1 // SPDX-License-Identifier: GPL-2.0+
2 // Copyright (c) 2016-2017 Hisilicon Limited.
4 #include <linux/acpi.h>
5 #include <linux/device.h>
6 #include <linux/etherdevice.h>
7 #include <linux/init.h>
8 #include <linux/interrupt.h>
9 #include <linux/kernel.h>
10 #include <linux/module.h>
11 #include <linux/netdevice.h>
12 #include <linux/pci.h>
13 #include <linux/platform_device.h>
14 #include <linux/if_vlan.h>
15 #include <linux/crash_dump.h>
17 #include <net/rtnetlink.h>
19 #include "hclge_cmd.h"
20 #include "hclge_dcb.h"
21 #include "hclge_main.h"
22 #include "hclge_mbx.h"
23 #include "hclge_mdio.h"
24 #include "hclge_regs.h"
26 #include "hclge_err.h"
28 #include "hclge_devlink.h"
29 #include "hclge_comm_cmd.h"
31 #include "hclge_trace.h"
33 #define HCLGE_NAME "hclge"
35 #define HCLGE_BUF_SIZE_UNIT 256U
36 #define HCLGE_BUF_MUL_BY 2
37 #define HCLGE_BUF_DIV_BY 2
38 #define NEED_RESERVE_TC_NUM 2
39 #define BUF_MAX_PERCENT 100
40 #define BUF_RESERVE_PERCENT 90
42 #define HCLGE_RESET_MAX_FAIL_CNT 5
43 #define HCLGE_RESET_SYNC_TIME 100
44 #define HCLGE_PF_RESET_SYNC_TIME 20
45 #define HCLGE_PF_RESET_SYNC_CNT 1500
47 #define HCLGE_LINK_STATUS_MS 10
49 static int hclge_set_mac_mtu(struct hclge_dev
*hdev
, int new_mps
);
50 static int hclge_init_vlan_config(struct hclge_dev
*hdev
);
51 static void hclge_sync_vlan_filter(struct hclge_dev
*hdev
);
52 static int hclge_reset_ae_dev(struct hnae3_ae_dev
*ae_dev
);
53 static bool hclge_get_hw_reset_stat(struct hnae3_handle
*handle
);
54 static void hclge_rfs_filter_expire(struct hclge_dev
*hdev
);
55 static int hclge_clear_arfs_rules(struct hclge_dev
*hdev
);
56 static enum hnae3_reset_type
hclge_get_reset_level(struct hnae3_ae_dev
*ae_dev
,
58 static int hclge_set_default_loopback(struct hclge_dev
*hdev
);
60 static void hclge_sync_mac_table(struct hclge_dev
*hdev
);
61 static void hclge_restore_hw_table(struct hclge_dev
*hdev
);
62 static void hclge_sync_promisc_mode(struct hclge_dev
*hdev
);
63 static void hclge_sync_fd_table(struct hclge_dev
*hdev
);
64 static void hclge_update_fec_stats(struct hclge_dev
*hdev
);
65 static int hclge_mac_link_status_wait(struct hclge_dev
*hdev
, int link_ret
,
67 static int hclge_update_port_info(struct hclge_dev
*hdev
);
69 static struct hnae3_ae_algo ae_algo
;
71 static struct workqueue_struct
*hclge_wq
;
73 static const struct pci_device_id ae_algo_pci_tbl
[] = {
74 {PCI_VDEVICE(HUAWEI
, HNAE3_DEV_ID_GE
), 0},
75 {PCI_VDEVICE(HUAWEI
, HNAE3_DEV_ID_25GE
), 0},
76 {PCI_VDEVICE(HUAWEI
, HNAE3_DEV_ID_25GE_RDMA
), 0},
77 {PCI_VDEVICE(HUAWEI
, HNAE3_DEV_ID_25GE_RDMA_MACSEC
), 0},
78 {PCI_VDEVICE(HUAWEI
, HNAE3_DEV_ID_50GE_RDMA
), 0},
79 {PCI_VDEVICE(HUAWEI
, HNAE3_DEV_ID_50GE_RDMA_MACSEC
), 0},
80 {PCI_VDEVICE(HUAWEI
, HNAE3_DEV_ID_100G_RDMA_MACSEC
), 0},
81 {PCI_VDEVICE(HUAWEI
, HNAE3_DEV_ID_200G_RDMA
), 0},
82 /* required last entry */
86 MODULE_DEVICE_TABLE(pci
, ae_algo_pci_tbl
);
88 static const char hns3_nic_test_strs
[][ETH_GSTRING_LEN
] = {
89 "External Loopback test",
91 "Serdes serial Loopback test",
92 "Serdes parallel Loopback test",
96 static const struct hclge_comm_stats_str g_mac_stats_string
[] = {
97 {"mac_tx_mac_pause_num", HCLGE_MAC_STATS_MAX_NUM_V1
,
98 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_mac_pause_num
)},
99 {"mac_rx_mac_pause_num", HCLGE_MAC_STATS_MAX_NUM_V1
,
100 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_mac_pause_num
)},
101 {"mac_tx_pause_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2
,
102 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pause_xoff_time
)},
103 {"mac_rx_pause_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2
,
104 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pause_xoff_time
)},
105 {"mac_tx_control_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1
,
106 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_ctrl_pkt_num
)},
107 {"mac_rx_control_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1
,
108 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_ctrl_pkt_num
)},
109 {"mac_tx_pfc_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1
,
110 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pause_pkt_num
)},
111 {"mac_tx_pfc_pri0_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1
,
112 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri0_pkt_num
)},
113 {"mac_tx_pfc_pri1_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1
,
114 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri1_pkt_num
)},
115 {"mac_tx_pfc_pri2_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1
,
116 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri2_pkt_num
)},
117 {"mac_tx_pfc_pri3_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1
,
118 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri3_pkt_num
)},
119 {"mac_tx_pfc_pri4_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1
,
120 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri4_pkt_num
)},
121 {"mac_tx_pfc_pri5_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1
,
122 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri5_pkt_num
)},
123 {"mac_tx_pfc_pri6_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1
,
124 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri6_pkt_num
)},
125 {"mac_tx_pfc_pri7_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1
,
126 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri7_pkt_num
)},
127 {"mac_tx_pfc_pri0_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2
,
128 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri0_xoff_time
)},
129 {"mac_tx_pfc_pri1_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2
,
130 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri1_xoff_time
)},
131 {"mac_tx_pfc_pri2_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2
,
132 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri2_xoff_time
)},
133 {"mac_tx_pfc_pri3_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2
,
134 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri3_xoff_time
)},
135 {"mac_tx_pfc_pri4_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2
,
136 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri4_xoff_time
)},
137 {"mac_tx_pfc_pri5_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2
,
138 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri5_xoff_time
)},
139 {"mac_tx_pfc_pri6_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2
,
140 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri6_xoff_time
)},
141 {"mac_tx_pfc_pri7_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2
,
142 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri7_xoff_time
)},
143 {"mac_rx_pfc_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1
,
144 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pause_pkt_num
)},
145 {"mac_rx_pfc_pri0_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1
,
146 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri0_pkt_num
)},
147 {"mac_rx_pfc_pri1_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1
,
148 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri1_pkt_num
)},
149 {"mac_rx_pfc_pri2_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1
,
150 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri2_pkt_num
)},
151 {"mac_rx_pfc_pri3_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1
,
152 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri3_pkt_num
)},
153 {"mac_rx_pfc_pri4_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1
,
154 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri4_pkt_num
)},
155 {"mac_rx_pfc_pri5_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1
,
156 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri5_pkt_num
)},
157 {"mac_rx_pfc_pri6_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1
,
158 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri6_pkt_num
)},
159 {"mac_rx_pfc_pri7_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1
,
160 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri7_pkt_num
)},
161 {"mac_rx_pfc_pri0_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2
,
162 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri0_xoff_time
)},
163 {"mac_rx_pfc_pri1_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2
,
164 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri1_xoff_time
)},
165 {"mac_rx_pfc_pri2_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2
,
166 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri2_xoff_time
)},
167 {"mac_rx_pfc_pri3_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2
,
168 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri3_xoff_time
)},
169 {"mac_rx_pfc_pri4_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2
,
170 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri4_xoff_time
)},
171 {"mac_rx_pfc_pri5_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2
,
172 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri5_xoff_time
)},
173 {"mac_rx_pfc_pri6_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2
,
174 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri6_xoff_time
)},
175 {"mac_rx_pfc_pri7_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2
,
176 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri7_xoff_time
)},
177 {"mac_tx_total_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1
,
178 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_pkt_num
)},
179 {"mac_tx_total_oct_num", HCLGE_MAC_STATS_MAX_NUM_V1
,
180 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_oct_num
)},
181 {"mac_tx_good_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1
,
182 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_pkt_num
)},
183 {"mac_tx_bad_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1
,
184 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_pkt_num
)},
185 {"mac_tx_good_oct_num", HCLGE_MAC_STATS_MAX_NUM_V1
,
186 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_oct_num
)},
187 {"mac_tx_bad_oct_num", HCLGE_MAC_STATS_MAX_NUM_V1
,
188 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_oct_num
)},
189 {"mac_tx_uni_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1
,
190 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_uni_pkt_num
)},
191 {"mac_tx_multi_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1
,
192 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_multi_pkt_num
)},
193 {"mac_tx_broad_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1
,
194 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_broad_pkt_num
)},
195 {"mac_tx_undersize_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1
,
196 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undersize_pkt_num
)},
197 {"mac_tx_oversize_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1
,
198 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_oversize_pkt_num
)},
199 {"mac_tx_64_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1
,
200 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_64_oct_pkt_num
)},
201 {"mac_tx_65_127_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1
,
202 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_65_127_oct_pkt_num
)},
203 {"mac_tx_128_255_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1
,
204 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_128_255_oct_pkt_num
)},
205 {"mac_tx_256_511_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1
,
206 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_256_511_oct_pkt_num
)},
207 {"mac_tx_512_1023_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1
,
208 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_512_1023_oct_pkt_num
)},
209 {"mac_tx_1024_1518_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1
,
210 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1024_1518_oct_pkt_num
)},
211 {"mac_tx_1519_2047_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1
,
212 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_2047_oct_pkt_num
)},
213 {"mac_tx_2048_4095_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1
,
214 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_2048_4095_oct_pkt_num
)},
215 {"mac_tx_4096_8191_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1
,
216 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_4096_8191_oct_pkt_num
)},
217 {"mac_tx_8192_9216_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1
,
218 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_8192_9216_oct_pkt_num
)},
219 {"mac_tx_9217_12287_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1
,
220 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_9217_12287_oct_pkt_num
)},
221 {"mac_tx_12288_16383_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1
,
222 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_12288_16383_oct_pkt_num
)},
223 {"mac_tx_1519_max_good_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1
,
224 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_good_oct_pkt_num
)},
225 {"mac_tx_1519_max_bad_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1
,
226 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_bad_oct_pkt_num
)},
227 {"mac_rx_total_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1
,
228 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_pkt_num
)},
229 {"mac_rx_total_oct_num", HCLGE_MAC_STATS_MAX_NUM_V1
,
230 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_oct_num
)},
231 {"mac_rx_good_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1
,
232 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_pkt_num
)},
233 {"mac_rx_bad_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1
,
234 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_pkt_num
)},
235 {"mac_rx_good_oct_num", HCLGE_MAC_STATS_MAX_NUM_V1
,
236 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_oct_num
)},
237 {"mac_rx_bad_oct_num", HCLGE_MAC_STATS_MAX_NUM_V1
,
238 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_oct_num
)},
239 {"mac_rx_uni_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1
,
240 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_uni_pkt_num
)},
241 {"mac_rx_multi_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1
,
242 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_multi_pkt_num
)},
243 {"mac_rx_broad_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1
,
244 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_broad_pkt_num
)},
245 {"mac_rx_undersize_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1
,
246 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undersize_pkt_num
)},
247 {"mac_rx_oversize_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1
,
248 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_oversize_pkt_num
)},
249 {"mac_rx_64_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1
,
250 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_64_oct_pkt_num
)},
251 {"mac_rx_65_127_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1
,
252 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_65_127_oct_pkt_num
)},
253 {"mac_rx_128_255_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1
,
254 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_128_255_oct_pkt_num
)},
255 {"mac_rx_256_511_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1
,
256 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_256_511_oct_pkt_num
)},
257 {"mac_rx_512_1023_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1
,
258 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_512_1023_oct_pkt_num
)},
259 {"mac_rx_1024_1518_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1
,
260 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1024_1518_oct_pkt_num
)},
261 {"mac_rx_1519_2047_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1
,
262 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_2047_oct_pkt_num
)},
263 {"mac_rx_2048_4095_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1
,
264 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_2048_4095_oct_pkt_num
)},
265 {"mac_rx_4096_8191_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1
,
266 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_4096_8191_oct_pkt_num
)},
267 {"mac_rx_8192_9216_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1
,
268 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_8192_9216_oct_pkt_num
)},
269 {"mac_rx_9217_12287_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1
,
270 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_9217_12287_oct_pkt_num
)},
271 {"mac_rx_12288_16383_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1
,
272 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_12288_16383_oct_pkt_num
)},
273 {"mac_rx_1519_max_good_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1
,
274 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_good_oct_pkt_num
)},
275 {"mac_rx_1519_max_bad_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1
,
276 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_bad_oct_pkt_num
)},
278 {"mac_tx_fragment_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1
,
279 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_fragment_pkt_num
)},
280 {"mac_tx_undermin_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1
,
281 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undermin_pkt_num
)},
282 {"mac_tx_jabber_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1
,
283 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_jabber_pkt_num
)},
284 {"mac_tx_err_all_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1
,
285 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_err_all_pkt_num
)},
286 {"mac_tx_from_app_good_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1
,
287 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_good_pkt_num
)},
288 {"mac_tx_from_app_bad_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1
,
289 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_bad_pkt_num
)},
290 {"mac_rx_fragment_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1
,
291 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fragment_pkt_num
)},
292 {"mac_rx_undermin_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1
,
293 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undermin_pkt_num
)},
294 {"mac_rx_jabber_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1
,
295 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_jabber_pkt_num
)},
296 {"mac_rx_fcs_err_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1
,
297 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fcs_err_pkt_num
)},
298 {"mac_rx_send_app_good_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1
,
299 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_good_pkt_num
)},
300 {"mac_rx_send_app_bad_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1
,
301 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_bad_pkt_num
)}
304 static const struct hclge_mac_mgr_tbl_entry_cmd hclge_mgr_table
[] = {
306 .flags
= HCLGE_MAC_MGR_MASK_VLAN_B
,
307 .ethter_type
= cpu_to_le16(ETH_P_LLDP
),
308 .mac_addr
= {0x01, 0x80, 0xc2, 0x00, 0x00, 0x0e},
309 .i_port_bitmap
= 0x1,
313 static const struct key_info meta_data_key_info
[] = {
314 { PACKET_TYPE_ID
, 6 },
321 { TUNNEL_PACKET
, 1 },
324 static const struct key_info tuple_key_info
[] = {
325 { OUTER_DST_MAC
, 48, KEY_OPT_MAC
, -1, -1 },
326 { OUTER_SRC_MAC
, 48, KEY_OPT_MAC
, -1, -1 },
327 { OUTER_VLAN_TAG_FST
, 16, KEY_OPT_LE16
, -1, -1 },
328 { OUTER_VLAN_TAG_SEC
, 16, KEY_OPT_LE16
, -1, -1 },
329 { OUTER_ETH_TYPE
, 16, KEY_OPT_LE16
, -1, -1 },
330 { OUTER_L2_RSV
, 16, KEY_OPT_LE16
, -1, -1 },
331 { OUTER_IP_TOS
, 8, KEY_OPT_U8
, -1, -1 },
332 { OUTER_IP_PROTO
, 8, KEY_OPT_U8
, -1, -1 },
333 { OUTER_SRC_IP
, 32, KEY_OPT_IP
, -1, -1 },
334 { OUTER_DST_IP
, 32, KEY_OPT_IP
, -1, -1 },
335 { OUTER_L3_RSV
, 16, KEY_OPT_LE16
, -1, -1 },
336 { OUTER_SRC_PORT
, 16, KEY_OPT_LE16
, -1, -1 },
337 { OUTER_DST_PORT
, 16, KEY_OPT_LE16
, -1, -1 },
338 { OUTER_L4_RSV
, 32, KEY_OPT_LE32
, -1, -1 },
339 { OUTER_TUN_VNI
, 24, KEY_OPT_VNI
, -1, -1 },
340 { OUTER_TUN_FLOW_ID
, 8, KEY_OPT_U8
, -1, -1 },
341 { INNER_DST_MAC
, 48, KEY_OPT_MAC
,
342 offsetof(struct hclge_fd_rule
, tuples
.dst_mac
),
343 offsetof(struct hclge_fd_rule
, tuples_mask
.dst_mac
) },
344 { INNER_SRC_MAC
, 48, KEY_OPT_MAC
,
345 offsetof(struct hclge_fd_rule
, tuples
.src_mac
),
346 offsetof(struct hclge_fd_rule
, tuples_mask
.src_mac
) },
347 { INNER_VLAN_TAG_FST
, 16, KEY_OPT_LE16
,
348 offsetof(struct hclge_fd_rule
, tuples
.vlan_tag1
),
349 offsetof(struct hclge_fd_rule
, tuples_mask
.vlan_tag1
) },
350 { INNER_VLAN_TAG_SEC
, 16, KEY_OPT_LE16
, -1, -1 },
351 { INNER_ETH_TYPE
, 16, KEY_OPT_LE16
,
352 offsetof(struct hclge_fd_rule
, tuples
.ether_proto
),
353 offsetof(struct hclge_fd_rule
, tuples_mask
.ether_proto
) },
354 { INNER_L2_RSV
, 16, KEY_OPT_LE16
,
355 offsetof(struct hclge_fd_rule
, tuples
.l2_user_def
),
356 offsetof(struct hclge_fd_rule
, tuples_mask
.l2_user_def
) },
357 { INNER_IP_TOS
, 8, KEY_OPT_U8
,
358 offsetof(struct hclge_fd_rule
, tuples
.ip_tos
),
359 offsetof(struct hclge_fd_rule
, tuples_mask
.ip_tos
) },
360 { INNER_IP_PROTO
, 8, KEY_OPT_U8
,
361 offsetof(struct hclge_fd_rule
, tuples
.ip_proto
),
362 offsetof(struct hclge_fd_rule
, tuples_mask
.ip_proto
) },
363 { INNER_SRC_IP
, 32, KEY_OPT_IP
,
364 offsetof(struct hclge_fd_rule
, tuples
.src_ip
),
365 offsetof(struct hclge_fd_rule
, tuples_mask
.src_ip
) },
366 { INNER_DST_IP
, 32, KEY_OPT_IP
,
367 offsetof(struct hclge_fd_rule
, tuples
.dst_ip
),
368 offsetof(struct hclge_fd_rule
, tuples_mask
.dst_ip
) },
369 { INNER_L3_RSV
, 16, KEY_OPT_LE16
,
370 offsetof(struct hclge_fd_rule
, tuples
.l3_user_def
),
371 offsetof(struct hclge_fd_rule
, tuples_mask
.l3_user_def
) },
372 { INNER_SRC_PORT
, 16, KEY_OPT_LE16
,
373 offsetof(struct hclge_fd_rule
, tuples
.src_port
),
374 offsetof(struct hclge_fd_rule
, tuples_mask
.src_port
) },
375 { INNER_DST_PORT
, 16, KEY_OPT_LE16
,
376 offsetof(struct hclge_fd_rule
, tuples
.dst_port
),
377 offsetof(struct hclge_fd_rule
, tuples_mask
.dst_port
) },
378 { INNER_L4_RSV
, 32, KEY_OPT_LE32
,
379 offsetof(struct hclge_fd_rule
, tuples
.l4_user_def
),
380 offsetof(struct hclge_fd_rule
, tuples_mask
.l4_user_def
) },
384 * hclge_cmd_send - send command to command queue
385 * @hw: pointer to the hw struct
386 * @desc: prefilled descriptor for describing the command
387 * @num : the number of descriptors to be sent
389 * This is the main send command for command queue, it
390 * sends the queue, cleans the queue, etc
392 int hclge_cmd_send(struct hclge_hw
*hw
, struct hclge_desc
*desc
, int num
)
394 return hclge_comm_cmd_send(&hw
->hw
, desc
, num
);
397 static void hclge_trace_cmd_send(struct hclge_comm_hw
*hw
, struct hclge_desc
*desc
,
398 int num
, bool is_special
)
402 trace_hclge_pf_cmd_send(hw
, desc
, 0, num
);
405 for (i
= 1; i
< num
; i
++)
406 trace_hclge_pf_cmd_send(hw
, &desc
[i
], i
, num
);
408 for (i
= 1; i
< num
; i
++)
409 trace_hclge_pf_special_cmd_send(hw
, (__le32
*)&desc
[i
],
414 static void hclge_trace_cmd_get(struct hclge_comm_hw
*hw
, struct hclge_desc
*desc
,
415 int num
, bool is_special
)
419 if (!HCLGE_COMM_SEND_SYNC(le16_to_cpu(desc
->flag
)))
422 trace_hclge_pf_cmd_get(hw
, desc
, 0, num
);
425 for (i
= 1; i
< num
; i
++)
426 trace_hclge_pf_cmd_get(hw
, &desc
[i
], i
, num
);
428 for (i
= 1; i
< num
; i
++)
429 trace_hclge_pf_special_cmd_get(hw
, (__le32
*)&desc
[i
],
434 static const struct hclge_comm_cmq_ops hclge_cmq_ops
= {
435 .trace_cmd_send
= hclge_trace_cmd_send
,
436 .trace_cmd_get
= hclge_trace_cmd_get
,
439 static int hclge_mac_update_stats_defective(struct hclge_dev
*hdev
)
441 #define HCLGE_MAC_CMD_NUM 21
443 u64
*data
= (u64
*)(&hdev
->mac_stats
);
444 struct hclge_desc desc
[HCLGE_MAC_CMD_NUM
];
450 hclge_cmd_setup_basic_desc(&desc
[0], HCLGE_OPC_STATS_MAC
, true);
451 ret
= hclge_cmd_send(&hdev
->hw
, desc
, HCLGE_MAC_CMD_NUM
);
453 dev_err(&hdev
->pdev
->dev
,
454 "Get MAC pkt stats fail, status = %d.\n", ret
);
459 /* The first desc has a 64-bit header, so data size need to minus 1 */
460 data_size
= sizeof(desc
) / (sizeof(u64
)) - 1;
462 desc_data
= (__le64
*)(&desc
[0].data
[0]);
463 for (i
= 0; i
< data_size
; i
++) {
464 /* data memory is continuous becase only the first desc has a
465 * header in this command
467 *data
+= le64_to_cpu(*desc_data
);
475 static int hclge_mac_update_stats_complete(struct hclge_dev
*hdev
)
477 #define HCLGE_REG_NUM_PER_DESC 4
479 u32 reg_num
= hdev
->ae_dev
->dev_specs
.mac_stats_num
;
480 u64
*data
= (u64
*)(&hdev
->mac_stats
);
481 struct hclge_desc
*desc
;
488 /* The first desc has a 64-bit header, so need to consider it */
489 desc_num
= reg_num
/ HCLGE_REG_NUM_PER_DESC
+ 1;
491 /* This may be called inside atomic sections,
492 * so GFP_ATOMIC is more suitalbe here
494 desc
= kcalloc(desc_num
, sizeof(struct hclge_desc
), GFP_ATOMIC
);
498 hclge_cmd_setup_basic_desc(&desc
[0], HCLGE_OPC_STATS_MAC_ALL
, true);
499 ret
= hclge_cmd_send(&hdev
->hw
, desc
, desc_num
);
505 data_size
= min_t(u32
, sizeof(hdev
->mac_stats
) / sizeof(u64
), reg_num
);
507 desc_data
= (__le64
*)(&desc
[0].data
[0]);
508 for (i
= 0; i
< data_size
; i
++) {
509 /* data memory is continuous becase only the first desc has a
510 * header in this command
512 *data
+= le64_to_cpu(*desc_data
);
522 static int hclge_mac_query_reg_num(struct hclge_dev
*hdev
, u32
*reg_num
)
524 struct hclge_desc desc
;
527 /* Driver needs total register number of both valid registers and
528 * reserved registers, but the old firmware only returns number
529 * of valid registers in device V2. To be compatible with these
530 * devices, driver uses a fixed value.
532 if (hdev
->ae_dev
->dev_version
== HNAE3_DEVICE_VERSION_V2
) {
533 *reg_num
= HCLGE_MAC_STATS_MAX_NUM_V1
;
537 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_QUERY_MAC_REG_NUM
, true);
538 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
540 dev_err(&hdev
->pdev
->dev
,
541 "failed to query mac statistic reg number, ret = %d\n",
546 *reg_num
= le32_to_cpu(desc
.data
[0]);
548 dev_err(&hdev
->pdev
->dev
,
549 "mac statistic reg number is invalid!\n");
556 int hclge_mac_update_stats(struct hclge_dev
*hdev
)
558 /* The firmware supports the new statistics acquisition method */
559 if (hdev
->ae_dev
->dev_specs
.mac_stats_num
)
560 return hclge_mac_update_stats_complete(hdev
);
562 return hclge_mac_update_stats_defective(hdev
);
565 static int hclge_comm_get_count(struct hclge_dev
*hdev
,
566 const struct hclge_comm_stats_str strs
[],
572 for (i
= 0; i
< size
; i
++)
573 if (strs
[i
].stats_num
<= hdev
->ae_dev
->dev_specs
.mac_stats_num
)
579 static u64
*hclge_comm_get_stats(struct hclge_dev
*hdev
,
580 const struct hclge_comm_stats_str strs
[],
586 for (i
= 0; i
< size
; i
++) {
587 if (strs
[i
].stats_num
> hdev
->ae_dev
->dev_specs
.mac_stats_num
)
590 *buf
= HCLGE_STATS_READ(&hdev
->mac_stats
, strs
[i
].offset
);
597 static void hclge_comm_get_strings(struct hclge_dev
*hdev
, u32 stringset
,
598 const struct hclge_comm_stats_str strs
[],
603 if (stringset
!= ETH_SS_STATS
)
606 for (i
= 0; i
< size
; i
++) {
607 if (strs
[i
].stats_num
> hdev
->ae_dev
->dev_specs
.mac_stats_num
)
610 ethtool_puts(data
, strs
[i
].desc
);
614 static void hclge_update_stats_for_all(struct hclge_dev
*hdev
)
616 struct hnae3_handle
*handle
;
619 handle
= &hdev
->vport
[0].nic
;
620 if (handle
->client
) {
621 status
= hclge_comm_tqps_update_stats(handle
, &hdev
->hw
.hw
);
623 dev_err(&hdev
->pdev
->dev
,
624 "Update TQPS stats fail, status = %d.\n",
629 hclge_update_fec_stats(hdev
);
631 status
= hclge_mac_update_stats(hdev
);
633 dev_err(&hdev
->pdev
->dev
,
634 "Update MAC stats fail, status = %d.\n", status
);
637 static void hclge_update_stats(struct hnae3_handle
*handle
)
639 struct hclge_vport
*vport
= hclge_get_vport(handle
);
640 struct hclge_dev
*hdev
= vport
->back
;
643 if (test_and_set_bit(HCLGE_STATE_STATISTICS_UPDATING
, &hdev
->state
))
646 status
= hclge_mac_update_stats(hdev
);
648 dev_err(&hdev
->pdev
->dev
,
649 "Update MAC stats fail, status = %d.\n",
652 status
= hclge_comm_tqps_update_stats(handle
, &hdev
->hw
.hw
);
654 dev_err(&hdev
->pdev
->dev
,
655 "Update TQPS stats fail, status = %d.\n",
658 clear_bit(HCLGE_STATE_STATISTICS_UPDATING
, &hdev
->state
);
661 static int hclge_get_sset_count(struct hnae3_handle
*handle
, int stringset
)
663 #define HCLGE_LOOPBACK_TEST_FLAGS (HNAE3_SUPPORT_APP_LOOPBACK | \
664 HNAE3_SUPPORT_PHY_LOOPBACK | \
665 HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK | \
666 HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK | \
667 HNAE3_SUPPORT_EXTERNAL_LOOPBACK)
669 struct hclge_vport
*vport
= hclge_get_vport(handle
);
670 struct hclge_dev
*hdev
= vport
->back
;
673 /* Loopback test support rules:
674 * mac: only GE mode support
675 * serdes: all mac mode will support include GE/XGE/LGE/CGE
676 * phy: only support when phy device exist on board
678 if (stringset
== ETH_SS_TEST
) {
679 /* clear loopback bit flags at first */
680 handle
->flags
= (handle
->flags
& (~HCLGE_LOOPBACK_TEST_FLAGS
));
681 if (hdev
->ae_dev
->dev_version
>= HNAE3_DEVICE_VERSION_V2
||
682 hdev
->hw
.mac
.speed
== HCLGE_MAC_SPEED_10M
||
683 hdev
->hw
.mac
.speed
== HCLGE_MAC_SPEED_100M
||
684 hdev
->hw
.mac
.speed
== HCLGE_MAC_SPEED_1G
) {
686 handle
->flags
|= HNAE3_SUPPORT_APP_LOOPBACK
;
689 if (hdev
->ae_dev
->dev_specs
.hilink_version
!=
692 handle
->flags
|= HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK
;
696 handle
->flags
|= HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK
;
698 handle
->flags
|= HNAE3_SUPPORT_EXTERNAL_LOOPBACK
;
700 if ((hdev
->hw
.mac
.phydev
&& hdev
->hw
.mac
.phydev
->drv
&&
701 hdev
->hw
.mac
.phydev
->drv
->set_loopback
) ||
702 hnae3_dev_phy_imp_supported(hdev
)) {
704 handle
->flags
|= HNAE3_SUPPORT_PHY_LOOPBACK
;
706 } else if (stringset
== ETH_SS_STATS
) {
707 count
= hclge_comm_get_count(hdev
, g_mac_stats_string
,
708 ARRAY_SIZE(g_mac_stats_string
)) +
709 hclge_comm_tqps_get_sset_count(handle
);
715 static void hclge_get_strings(struct hnae3_handle
*handle
, u32 stringset
,
718 struct hclge_vport
*vport
= hclge_get_vport(handle
);
719 struct hclge_dev
*hdev
= vport
->back
;
723 if (stringset
== ETH_SS_STATS
) {
724 size
= ARRAY_SIZE(g_mac_stats_string
);
725 hclge_comm_get_strings(hdev
, stringset
, g_mac_stats_string
,
727 hclge_comm_tqps_get_strings(handle
, data
);
728 } else if (stringset
== ETH_SS_TEST
) {
729 if (handle
->flags
& HNAE3_SUPPORT_EXTERNAL_LOOPBACK
) {
730 str
= hns3_nic_test_strs
[HNAE3_LOOP_EXTERNAL
];
731 ethtool_puts(data
, str
);
733 if (handle
->flags
& HNAE3_SUPPORT_APP_LOOPBACK
) {
734 str
= hns3_nic_test_strs
[HNAE3_LOOP_APP
];
735 ethtool_puts(data
, str
);
737 if (handle
->flags
& HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK
) {
738 str
= hns3_nic_test_strs
[HNAE3_LOOP_SERIAL_SERDES
];
739 ethtool_puts(data
, str
);
741 if (handle
->flags
& HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK
) {
742 str
= hns3_nic_test_strs
[HNAE3_LOOP_PARALLEL_SERDES
];
743 ethtool_puts(data
, str
);
745 if (handle
->flags
& HNAE3_SUPPORT_PHY_LOOPBACK
) {
746 str
= hns3_nic_test_strs
[HNAE3_LOOP_PHY
];
747 ethtool_puts(data
, str
);
752 static void hclge_get_stats(struct hnae3_handle
*handle
, u64
*data
)
754 struct hclge_vport
*vport
= hclge_get_vport(handle
);
755 struct hclge_dev
*hdev
= vport
->back
;
758 p
= hclge_comm_get_stats(hdev
, g_mac_stats_string
,
759 ARRAY_SIZE(g_mac_stats_string
), data
);
760 p
= hclge_comm_tqps_get_stats(handle
, p
);
763 static void hclge_get_mac_stat(struct hnae3_handle
*handle
,
764 struct hns3_mac_stats
*mac_stats
)
766 struct hclge_vport
*vport
= hclge_get_vport(handle
);
767 struct hclge_dev
*hdev
= vport
->back
;
769 hclge_update_stats(handle
);
771 mac_stats
->tx_pause_cnt
= hdev
->mac_stats
.mac_tx_mac_pause_num
;
772 mac_stats
->rx_pause_cnt
= hdev
->mac_stats
.mac_rx_mac_pause_num
;
775 static int hclge_parse_func_status(struct hclge_dev
*hdev
,
776 struct hclge_func_status_cmd
*status
)
778 #define HCLGE_MAC_ID_MASK 0xF
780 if (!(status
->pf_state
& HCLGE_PF_STATE_DONE
))
783 /* Set the pf to main pf */
784 if (status
->pf_state
& HCLGE_PF_STATE_MAIN
)
785 hdev
->flag
|= HCLGE_FLAG_MAIN
;
787 hdev
->flag
&= ~HCLGE_FLAG_MAIN
;
789 hdev
->hw
.mac
.mac_id
= status
->mac_id
& HCLGE_MAC_ID_MASK
;
793 static int hclge_query_function_status(struct hclge_dev
*hdev
)
795 #define HCLGE_QUERY_MAX_CNT 5
797 struct hclge_func_status_cmd
*req
;
798 struct hclge_desc desc
;
802 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_QUERY_FUNC_STATUS
, true);
803 req
= (struct hclge_func_status_cmd
*)desc
.data
;
806 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
808 dev_err(&hdev
->pdev
->dev
,
809 "query function status failed %d.\n", ret
);
813 /* Check pf reset is done */
816 usleep_range(1000, 2000);
817 } while (timeout
++ < HCLGE_QUERY_MAX_CNT
);
819 return hclge_parse_func_status(hdev
, req
);
822 static int hclge_query_pf_resource(struct hclge_dev
*hdev
)
824 struct hclge_pf_res_cmd
*req
;
825 struct hclge_desc desc
;
828 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_QUERY_PF_RSRC
, true);
829 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
831 dev_err(&hdev
->pdev
->dev
,
832 "query pf resource failed %d.\n", ret
);
836 req
= (struct hclge_pf_res_cmd
*)desc
.data
;
837 hdev
->num_tqps
= le16_to_cpu(req
->tqp_num
) +
838 le16_to_cpu(req
->ext_tqp_num
);
839 hdev
->pkt_buf_size
= le16_to_cpu(req
->buf_size
) << HCLGE_BUF_UNIT_S
;
841 if (req
->tx_buf_size
)
843 le16_to_cpu(req
->tx_buf_size
) << HCLGE_BUF_UNIT_S
;
845 hdev
->tx_buf_size
= HCLGE_DEFAULT_TX_BUF
;
847 hdev
->tx_buf_size
= roundup(hdev
->tx_buf_size
, HCLGE_BUF_SIZE_UNIT
);
849 if (req
->dv_buf_size
)
851 le16_to_cpu(req
->dv_buf_size
) << HCLGE_BUF_UNIT_S
;
853 hdev
->dv_buf_size
= HCLGE_DEFAULT_DV
;
855 hdev
->dv_buf_size
= roundup(hdev
->dv_buf_size
, HCLGE_BUF_SIZE_UNIT
);
857 hdev
->num_nic_msi
= le16_to_cpu(req
->msixcap_localid_number_nic
);
858 if (hdev
->num_nic_msi
< HNAE3_MIN_VECTOR_NUM
) {
859 dev_err(&hdev
->pdev
->dev
,
860 "only %u msi resources available, not enough for pf(min:2).\n",
865 if (hnae3_dev_roce_supported(hdev
)) {
867 le16_to_cpu(req
->pf_intr_vector_number_roce
);
869 /* PF should have NIC vectors and Roce vectors,
870 * NIC vectors are queued before Roce vectors.
872 hdev
->num_msi
= hdev
->num_nic_msi
+ hdev
->num_roce_msi
;
874 hdev
->num_msi
= hdev
->num_nic_msi
;
880 static int hclge_parse_speed(u8 speed_cmd
, u32
*speed
)
883 case HCLGE_FW_MAC_SPEED_10M
:
884 *speed
= HCLGE_MAC_SPEED_10M
;
886 case HCLGE_FW_MAC_SPEED_100M
:
887 *speed
= HCLGE_MAC_SPEED_100M
;
889 case HCLGE_FW_MAC_SPEED_1G
:
890 *speed
= HCLGE_MAC_SPEED_1G
;
892 case HCLGE_FW_MAC_SPEED_10G
:
893 *speed
= HCLGE_MAC_SPEED_10G
;
895 case HCLGE_FW_MAC_SPEED_25G
:
896 *speed
= HCLGE_MAC_SPEED_25G
;
898 case HCLGE_FW_MAC_SPEED_40G
:
899 *speed
= HCLGE_MAC_SPEED_40G
;
901 case HCLGE_FW_MAC_SPEED_50G
:
902 *speed
= HCLGE_MAC_SPEED_50G
;
904 case HCLGE_FW_MAC_SPEED_100G
:
905 *speed
= HCLGE_MAC_SPEED_100G
;
907 case HCLGE_FW_MAC_SPEED_200G
:
908 *speed
= HCLGE_MAC_SPEED_200G
;
917 static const struct hclge_speed_bit_map speed_bit_map
[] = {
918 {HCLGE_MAC_SPEED_10M
, HCLGE_SUPPORT_10M_BIT
},
919 {HCLGE_MAC_SPEED_100M
, HCLGE_SUPPORT_100M_BIT
},
920 {HCLGE_MAC_SPEED_1G
, HCLGE_SUPPORT_1G_BIT
},
921 {HCLGE_MAC_SPEED_10G
, HCLGE_SUPPORT_10G_BIT
},
922 {HCLGE_MAC_SPEED_25G
, HCLGE_SUPPORT_25G_BIT
},
923 {HCLGE_MAC_SPEED_40G
, HCLGE_SUPPORT_40G_BIT
},
924 {HCLGE_MAC_SPEED_50G
, HCLGE_SUPPORT_50G_BITS
},
925 {HCLGE_MAC_SPEED_100G
, HCLGE_SUPPORT_100G_BITS
},
926 {HCLGE_MAC_SPEED_200G
, HCLGE_SUPPORT_200G_BITS
},
929 static int hclge_get_speed_bit(u32 speed
, u32
*speed_bit
)
933 for (i
= 0; i
< ARRAY_SIZE(speed_bit_map
); i
++) {
934 if (speed
== speed_bit_map
[i
].speed
) {
935 *speed_bit
= speed_bit_map
[i
].speed_bit
;
943 static int hclge_check_port_speed(struct hnae3_handle
*handle
, u32 speed
)
945 struct hclge_vport
*vport
= hclge_get_vport(handle
);
946 struct hclge_dev
*hdev
= vport
->back
;
947 u32 speed_ability
= hdev
->hw
.mac
.speed_ability
;
951 ret
= hclge_get_speed_bit(speed
, &speed_bit
);
955 if (speed_bit
& speed_ability
)
961 static void hclge_update_fec_support(struct hclge_mac
*mac
)
963 linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT
, mac
->supported
);
964 linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT
, mac
->supported
);
965 linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_LLRS_BIT
, mac
->supported
);
966 linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT
, mac
->supported
);
968 if (mac
->fec_ability
& BIT(HNAE3_FEC_BASER
))
969 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT
,
971 if (mac
->fec_ability
& BIT(HNAE3_FEC_RS
))
972 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT
,
974 if (mac
->fec_ability
& BIT(HNAE3_FEC_LLRS
))
975 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_LLRS_BIT
,
977 if (mac
->fec_ability
& BIT(HNAE3_FEC_NONE
))
978 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT
,
982 static const struct hclge_link_mode_bmap hclge_sr_link_mode_bmap
[] = {
983 {HCLGE_SUPPORT_10G_BIT
, ETHTOOL_LINK_MODE_10000baseSR_Full_BIT
},
984 {HCLGE_SUPPORT_25G_BIT
, ETHTOOL_LINK_MODE_25000baseSR_Full_BIT
},
985 {HCLGE_SUPPORT_40G_BIT
, ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT
},
986 {HCLGE_SUPPORT_50G_R2_BIT
, ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT
},
987 {HCLGE_SUPPORT_50G_R1_BIT
, ETHTOOL_LINK_MODE_50000baseSR_Full_BIT
},
988 {HCLGE_SUPPORT_100G_R4_BIT
, ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT
},
989 {HCLGE_SUPPORT_100G_R2_BIT
, ETHTOOL_LINK_MODE_100000baseSR2_Full_BIT
},
990 {HCLGE_SUPPORT_200G_R4_EXT_BIT
,
991 ETHTOOL_LINK_MODE_200000baseSR4_Full_BIT
},
992 {HCLGE_SUPPORT_200G_R4_BIT
, ETHTOOL_LINK_MODE_200000baseSR4_Full_BIT
},
995 static const struct hclge_link_mode_bmap hclge_lr_link_mode_bmap
[] = {
996 {HCLGE_SUPPORT_10G_BIT
, ETHTOOL_LINK_MODE_10000baseLR_Full_BIT
},
997 {HCLGE_SUPPORT_40G_BIT
, ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT
},
998 {HCLGE_SUPPORT_50G_R1_BIT
, ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT
},
999 {HCLGE_SUPPORT_100G_R4_BIT
,
1000 ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT
},
1001 {HCLGE_SUPPORT_100G_R2_BIT
,
1002 ETHTOOL_LINK_MODE_100000baseLR2_ER2_FR2_Full_BIT
},
1003 {HCLGE_SUPPORT_200G_R4_EXT_BIT
,
1004 ETHTOOL_LINK_MODE_200000baseLR4_ER4_FR4_Full_BIT
},
1005 {HCLGE_SUPPORT_200G_R4_BIT
,
1006 ETHTOOL_LINK_MODE_200000baseLR4_ER4_FR4_Full_BIT
},
1009 static const struct hclge_link_mode_bmap hclge_cr_link_mode_bmap
[] = {
1010 {HCLGE_SUPPORT_10G_BIT
, ETHTOOL_LINK_MODE_10000baseCR_Full_BIT
},
1011 {HCLGE_SUPPORT_25G_BIT
, ETHTOOL_LINK_MODE_25000baseCR_Full_BIT
},
1012 {HCLGE_SUPPORT_40G_BIT
, ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT
},
1013 {HCLGE_SUPPORT_50G_R2_BIT
, ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT
},
1014 {HCLGE_SUPPORT_50G_R1_BIT
, ETHTOOL_LINK_MODE_50000baseCR_Full_BIT
},
1015 {HCLGE_SUPPORT_100G_R4_BIT
, ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT
},
1016 {HCLGE_SUPPORT_100G_R2_BIT
, ETHTOOL_LINK_MODE_100000baseCR2_Full_BIT
},
1017 {HCLGE_SUPPORT_200G_R4_EXT_BIT
,
1018 ETHTOOL_LINK_MODE_200000baseCR4_Full_BIT
},
1019 {HCLGE_SUPPORT_200G_R4_BIT
, ETHTOOL_LINK_MODE_200000baseCR4_Full_BIT
},
1022 static const struct hclge_link_mode_bmap hclge_kr_link_mode_bmap
[] = {
1023 {HCLGE_SUPPORT_1G_BIT
, ETHTOOL_LINK_MODE_1000baseKX_Full_BIT
},
1024 {HCLGE_SUPPORT_10G_BIT
, ETHTOOL_LINK_MODE_10000baseKR_Full_BIT
},
1025 {HCLGE_SUPPORT_25G_BIT
, ETHTOOL_LINK_MODE_25000baseKR_Full_BIT
},
1026 {HCLGE_SUPPORT_40G_BIT
, ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT
},
1027 {HCLGE_SUPPORT_50G_R2_BIT
, ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT
},
1028 {HCLGE_SUPPORT_50G_R1_BIT
, ETHTOOL_LINK_MODE_50000baseKR_Full_BIT
},
1029 {HCLGE_SUPPORT_100G_R4_BIT
, ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT
},
1030 {HCLGE_SUPPORT_100G_R2_BIT
, ETHTOOL_LINK_MODE_100000baseKR2_Full_BIT
},
1031 {HCLGE_SUPPORT_200G_R4_EXT_BIT
,
1032 ETHTOOL_LINK_MODE_200000baseKR4_Full_BIT
},
1033 {HCLGE_SUPPORT_200G_R4_BIT
, ETHTOOL_LINK_MODE_200000baseKR4_Full_BIT
},
1036 static void hclge_convert_setting_sr(u16 speed_ability
,
1037 unsigned long *link_mode
)
1041 for (i
= 0; i
< ARRAY_SIZE(hclge_sr_link_mode_bmap
); i
++) {
1042 if (speed_ability
& hclge_sr_link_mode_bmap
[i
].support_bit
)
1043 linkmode_set_bit(hclge_sr_link_mode_bmap
[i
].link_mode
,
1048 static void hclge_convert_setting_lr(u16 speed_ability
,
1049 unsigned long *link_mode
)
1053 for (i
= 0; i
< ARRAY_SIZE(hclge_lr_link_mode_bmap
); i
++) {
1054 if (speed_ability
& hclge_lr_link_mode_bmap
[i
].support_bit
)
1055 linkmode_set_bit(hclge_lr_link_mode_bmap
[i
].link_mode
,
1060 static void hclge_convert_setting_cr(u16 speed_ability
,
1061 unsigned long *link_mode
)
1065 for (i
= 0; i
< ARRAY_SIZE(hclge_cr_link_mode_bmap
); i
++) {
1066 if (speed_ability
& hclge_cr_link_mode_bmap
[i
].support_bit
)
1067 linkmode_set_bit(hclge_cr_link_mode_bmap
[i
].link_mode
,
1072 static void hclge_convert_setting_kr(u16 speed_ability
,
1073 unsigned long *link_mode
)
1077 for (i
= 0; i
< ARRAY_SIZE(hclge_kr_link_mode_bmap
); i
++) {
1078 if (speed_ability
& hclge_kr_link_mode_bmap
[i
].support_bit
)
1079 linkmode_set_bit(hclge_kr_link_mode_bmap
[i
].link_mode
,
1084 static void hclge_convert_setting_fec(struct hclge_mac
*mac
)
1086 /* If firmware has reported fec_ability, don't need to convert by speed */
1087 if (mac
->fec_ability
)
1090 switch (mac
->speed
) {
1091 case HCLGE_MAC_SPEED_10G
:
1092 case HCLGE_MAC_SPEED_40G
:
1093 mac
->fec_ability
= BIT(HNAE3_FEC_BASER
) | BIT(HNAE3_FEC_AUTO
) |
1094 BIT(HNAE3_FEC_NONE
);
1096 case HCLGE_MAC_SPEED_25G
:
1097 case HCLGE_MAC_SPEED_50G
:
1098 mac
->fec_ability
= BIT(HNAE3_FEC_BASER
) | BIT(HNAE3_FEC_RS
) |
1099 BIT(HNAE3_FEC_AUTO
) | BIT(HNAE3_FEC_NONE
);
1101 case HCLGE_MAC_SPEED_100G
:
1102 mac
->fec_ability
= BIT(HNAE3_FEC_RS
) | BIT(HNAE3_FEC_AUTO
) |
1103 BIT(HNAE3_FEC_NONE
);
1105 case HCLGE_MAC_SPEED_200G
:
1106 mac
->fec_ability
= BIT(HNAE3_FEC_RS
) | BIT(HNAE3_FEC_AUTO
) |
1107 BIT(HNAE3_FEC_LLRS
);
1110 mac
->fec_ability
= 0;
1115 hclge_update_fec_support(mac
);
1118 static void hclge_parse_fiber_link_mode(struct hclge_dev
*hdev
,
1121 struct hclge_mac
*mac
= &hdev
->hw
.mac
;
1123 if (speed_ability
& HCLGE_SUPPORT_1G_BIT
)
1124 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT
,
1127 hclge_convert_setting_sr(speed_ability
, mac
->supported
);
1128 hclge_convert_setting_lr(speed_ability
, mac
->supported
);
1129 hclge_convert_setting_cr(speed_ability
, mac
->supported
);
1130 if (hnae3_dev_fec_supported(hdev
))
1131 hclge_convert_setting_fec(mac
);
1133 if (hnae3_dev_pause_supported(hdev
))
1134 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT
, mac
->supported
);
1136 linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT
, mac
->supported
);
1137 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT
, mac
->supported
);
1140 static void hclge_parse_backplane_link_mode(struct hclge_dev
*hdev
,
1143 struct hclge_mac
*mac
= &hdev
->hw
.mac
;
1145 hclge_convert_setting_kr(speed_ability
, mac
->supported
);
1146 if (hnae3_dev_fec_supported(hdev
))
1147 hclge_convert_setting_fec(mac
);
1149 if (hnae3_dev_pause_supported(hdev
))
1150 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT
, mac
->supported
);
1152 linkmode_set_bit(ETHTOOL_LINK_MODE_Backplane_BIT
, mac
->supported
);
1153 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT
, mac
->supported
);
1156 static void hclge_parse_copper_link_mode(struct hclge_dev
*hdev
,
1159 unsigned long *supported
= hdev
->hw
.mac
.supported
;
1161 /* default to support all speed for GE port */
1163 speed_ability
= HCLGE_SUPPORT_GE
;
1165 if (speed_ability
& HCLGE_SUPPORT_1G_BIT
)
1166 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT
,
1169 if (speed_ability
& HCLGE_SUPPORT_100M_BIT
) {
1170 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT
,
1172 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT
,
1176 if (speed_ability
& HCLGE_SUPPORT_10M_BIT
) {
1177 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT
, supported
);
1178 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT
, supported
);
1181 if (hnae3_dev_pause_supported(hdev
)) {
1182 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT
, supported
);
1183 linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT
, supported
);
1186 linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT
, supported
);
1187 linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT
, supported
);
1190 static void hclge_parse_link_mode(struct hclge_dev
*hdev
, u16 speed_ability
)
1192 u8 media_type
= hdev
->hw
.mac
.media_type
;
1194 if (media_type
== HNAE3_MEDIA_TYPE_FIBER
)
1195 hclge_parse_fiber_link_mode(hdev
, speed_ability
);
1196 else if (media_type
== HNAE3_MEDIA_TYPE_COPPER
)
1197 hclge_parse_copper_link_mode(hdev
, speed_ability
);
1198 else if (media_type
== HNAE3_MEDIA_TYPE_BACKPLANE
)
1199 hclge_parse_backplane_link_mode(hdev
, speed_ability
);
1202 static u32
hclge_get_max_speed(u16 speed_ability
)
1204 if (speed_ability
& HCLGE_SUPPORT_200G_BITS
)
1205 return HCLGE_MAC_SPEED_200G
;
1207 if (speed_ability
& HCLGE_SUPPORT_100G_BITS
)
1208 return HCLGE_MAC_SPEED_100G
;
1210 if (speed_ability
& HCLGE_SUPPORT_50G_BITS
)
1211 return HCLGE_MAC_SPEED_50G
;
1213 if (speed_ability
& HCLGE_SUPPORT_40G_BIT
)
1214 return HCLGE_MAC_SPEED_40G
;
1216 if (speed_ability
& HCLGE_SUPPORT_25G_BIT
)
1217 return HCLGE_MAC_SPEED_25G
;
1219 if (speed_ability
& HCLGE_SUPPORT_10G_BIT
)
1220 return HCLGE_MAC_SPEED_10G
;
1222 if (speed_ability
& HCLGE_SUPPORT_1G_BIT
)
1223 return HCLGE_MAC_SPEED_1G
;
1225 if (speed_ability
& HCLGE_SUPPORT_100M_BIT
)
1226 return HCLGE_MAC_SPEED_100M
;
1228 if (speed_ability
& HCLGE_SUPPORT_10M_BIT
)
1229 return HCLGE_MAC_SPEED_10M
;
1231 return HCLGE_MAC_SPEED_1G
;
1234 static void hclge_parse_cfg(struct hclge_cfg
*cfg
, struct hclge_desc
*desc
)
1236 #define HCLGE_TX_SPARE_SIZE_UNIT 4096
1237 #define SPEED_ABILITY_EXT_SHIFT 8
1239 struct hclge_cfg_param_cmd
*req
;
1240 u64 mac_addr_tmp_high
;
1241 u16 speed_ability_ext
;
1245 req
= (struct hclge_cfg_param_cmd
*)desc
[0].data
;
1247 /* get the configuration */
1248 cfg
->tc_num
= hnae3_get_field(__le32_to_cpu(req
->param
[0]),
1249 HCLGE_CFG_TC_NUM_M
, HCLGE_CFG_TC_NUM_S
);
1250 cfg
->tqp_desc_num
= hnae3_get_field(__le32_to_cpu(req
->param
[0]),
1251 HCLGE_CFG_TQP_DESC_N_M
,
1252 HCLGE_CFG_TQP_DESC_N_S
);
1254 cfg
->phy_addr
= hnae3_get_field(__le32_to_cpu(req
->param
[1]),
1255 HCLGE_CFG_PHY_ADDR_M
,
1256 HCLGE_CFG_PHY_ADDR_S
);
1257 cfg
->media_type
= hnae3_get_field(__le32_to_cpu(req
->param
[1]),
1258 HCLGE_CFG_MEDIA_TP_M
,
1259 HCLGE_CFG_MEDIA_TP_S
);
1260 cfg
->rx_buf_len
= hnae3_get_field(__le32_to_cpu(req
->param
[1]),
1261 HCLGE_CFG_RX_BUF_LEN_M
,
1262 HCLGE_CFG_RX_BUF_LEN_S
);
1263 /* get mac_address */
1264 mac_addr_tmp
= __le32_to_cpu(req
->param
[2]);
1265 mac_addr_tmp_high
= hnae3_get_field(__le32_to_cpu(req
->param
[3]),
1266 HCLGE_CFG_MAC_ADDR_H_M
,
1267 HCLGE_CFG_MAC_ADDR_H_S
);
1269 mac_addr_tmp
|= (mac_addr_tmp_high
<< 31) << 1;
1271 cfg
->default_speed
= hnae3_get_field(__le32_to_cpu(req
->param
[3]),
1272 HCLGE_CFG_DEFAULT_SPEED_M
,
1273 HCLGE_CFG_DEFAULT_SPEED_S
);
1274 cfg
->vf_rss_size_max
= hnae3_get_field(__le32_to_cpu(req
->param
[3]),
1275 HCLGE_CFG_RSS_SIZE_M
,
1276 HCLGE_CFG_RSS_SIZE_S
);
1278 for (i
= 0; i
< ETH_ALEN
; i
++)
1279 cfg
->mac_addr
[i
] = (mac_addr_tmp
>> (8 * i
)) & 0xff;
1281 req
= (struct hclge_cfg_param_cmd
*)desc
[1].data
;
1282 cfg
->numa_node_map
= __le32_to_cpu(req
->param
[0]);
1284 cfg
->speed_ability
= hnae3_get_field(__le32_to_cpu(req
->param
[1]),
1285 HCLGE_CFG_SPEED_ABILITY_M
,
1286 HCLGE_CFG_SPEED_ABILITY_S
);
1287 speed_ability_ext
= hnae3_get_field(__le32_to_cpu(req
->param
[1]),
1288 HCLGE_CFG_SPEED_ABILITY_EXT_M
,
1289 HCLGE_CFG_SPEED_ABILITY_EXT_S
);
1290 cfg
->speed_ability
|= speed_ability_ext
<< SPEED_ABILITY_EXT_SHIFT
;
1292 cfg
->vlan_fliter_cap
= hnae3_get_field(__le32_to_cpu(req
->param
[1]),
1293 HCLGE_CFG_VLAN_FLTR_CAP_M
,
1294 HCLGE_CFG_VLAN_FLTR_CAP_S
);
1296 cfg
->umv_space
= hnae3_get_field(__le32_to_cpu(req
->param
[1]),
1297 HCLGE_CFG_UMV_TBL_SPACE_M
,
1298 HCLGE_CFG_UMV_TBL_SPACE_S
);
1300 cfg
->pf_rss_size_max
= hnae3_get_field(__le32_to_cpu(req
->param
[2]),
1301 HCLGE_CFG_PF_RSS_SIZE_M
,
1302 HCLGE_CFG_PF_RSS_SIZE_S
);
1304 /* HCLGE_CFG_PF_RSS_SIZE_M is the PF max rss size, which is a
1305 * power of 2, instead of reading out directly. This would
1306 * be more flexible for future changes and expansions.
1307 * When VF max rss size field is HCLGE_CFG_RSS_SIZE_S,
1308 * it does not make sense if PF's field is 0. In this case, PF and VF
1309 * has the same max rss size filed: HCLGE_CFG_RSS_SIZE_S.
1311 cfg
->pf_rss_size_max
= cfg
->pf_rss_size_max
?
1312 1U << cfg
->pf_rss_size_max
:
1313 cfg
->vf_rss_size_max
;
1315 /* The unit of the tx spare buffer size queried from configuration
1316 * file is HCLGE_TX_SPARE_SIZE_UNIT(4096) bytes, so a conversion is
1319 cfg
->tx_spare_buf_size
= hnae3_get_field(__le32_to_cpu(req
->param
[2]),
1320 HCLGE_CFG_TX_SPARE_BUF_SIZE_M
,
1321 HCLGE_CFG_TX_SPARE_BUF_SIZE_S
);
1322 cfg
->tx_spare_buf_size
*= HCLGE_TX_SPARE_SIZE_UNIT
;
1325 /* hclge_get_cfg: query the static parameter from flash
1326 * @hdev: pointer to struct hclge_dev
1327 * @hcfg: the config structure to be getted
1329 static int hclge_get_cfg(struct hclge_dev
*hdev
, struct hclge_cfg
*hcfg
)
1331 struct hclge_desc desc
[HCLGE_PF_CFG_DESC_NUM
];
1332 struct hclge_cfg_param_cmd
*req
;
1336 for (i
= 0; i
< HCLGE_PF_CFG_DESC_NUM
; i
++) {
1339 req
= (struct hclge_cfg_param_cmd
*)desc
[i
].data
;
1340 hclge_cmd_setup_basic_desc(&desc
[i
], HCLGE_OPC_GET_CFG_PARAM
,
1342 hnae3_set_field(offset
, HCLGE_CFG_OFFSET_M
,
1343 HCLGE_CFG_OFFSET_S
, i
* HCLGE_CFG_RD_LEN_BYTES
);
1344 /* Len should be united by 4 bytes when send to hardware */
1345 hnae3_set_field(offset
, HCLGE_CFG_RD_LEN_M
, HCLGE_CFG_RD_LEN_S
,
1346 HCLGE_CFG_RD_LEN_BYTES
/ HCLGE_CFG_RD_LEN_UNIT
);
1347 req
->offset
= cpu_to_le32(offset
);
1350 ret
= hclge_cmd_send(&hdev
->hw
, desc
, HCLGE_PF_CFG_DESC_NUM
);
1352 dev_err(&hdev
->pdev
->dev
, "get config failed %d.\n", ret
);
1356 hclge_parse_cfg(hcfg
, desc
);
1361 static void hclge_set_default_dev_specs(struct hclge_dev
*hdev
)
1363 #define HCLGE_MAX_NON_TSO_BD_NUM 8U
1365 struct hnae3_ae_dev
*ae_dev
= pci_get_drvdata(hdev
->pdev
);
1367 ae_dev
->dev_specs
.max_non_tso_bd_num
= HCLGE_MAX_NON_TSO_BD_NUM
;
1368 ae_dev
->dev_specs
.rss_ind_tbl_size
= HCLGE_RSS_IND_TBL_SIZE
;
1369 ae_dev
->dev_specs
.rss_key_size
= HCLGE_COMM_RSS_KEY_SIZE
;
1370 ae_dev
->dev_specs
.max_tm_rate
= HCLGE_ETHER_MAX_RATE
;
1371 ae_dev
->dev_specs
.max_int_gl
= HCLGE_DEF_MAX_INT_GL
;
1372 ae_dev
->dev_specs
.max_frm_size
= HCLGE_MAC_MAX_FRAME
;
1373 ae_dev
->dev_specs
.max_qset_num
= HCLGE_MAX_QSET_NUM
;
1374 ae_dev
->dev_specs
.umv_size
= HCLGE_DEFAULT_UMV_SPACE_PER_PF
;
1375 ae_dev
->dev_specs
.tnl_num
= 0;
1378 static void hclge_parse_dev_specs(struct hclge_dev
*hdev
,
1379 struct hclge_desc
*desc
)
1381 struct hnae3_ae_dev
*ae_dev
= pci_get_drvdata(hdev
->pdev
);
1382 struct hclge_dev_specs_0_cmd
*req0
;
1383 struct hclge_dev_specs_1_cmd
*req1
;
1385 req0
= (struct hclge_dev_specs_0_cmd
*)desc
[0].data
;
1386 req1
= (struct hclge_dev_specs_1_cmd
*)desc
[1].data
;
1388 ae_dev
->dev_specs
.max_non_tso_bd_num
= req0
->max_non_tso_bd_num
;
1389 ae_dev
->dev_specs
.rss_ind_tbl_size
=
1390 le16_to_cpu(req0
->rss_ind_tbl_size
);
1391 ae_dev
->dev_specs
.int_ql_max
= le16_to_cpu(req0
->int_ql_max
);
1392 ae_dev
->dev_specs
.rss_key_size
= le16_to_cpu(req0
->rss_key_size
);
1393 ae_dev
->dev_specs
.max_tm_rate
= le32_to_cpu(req0
->max_tm_rate
);
1394 ae_dev
->dev_specs
.max_qset_num
= le16_to_cpu(req1
->max_qset_num
);
1395 ae_dev
->dev_specs
.max_int_gl
= le16_to_cpu(req1
->max_int_gl
);
1396 ae_dev
->dev_specs
.max_frm_size
= le16_to_cpu(req1
->max_frm_size
);
1397 ae_dev
->dev_specs
.umv_size
= le16_to_cpu(req1
->umv_size
);
1398 ae_dev
->dev_specs
.mc_mac_size
= le16_to_cpu(req1
->mc_mac_size
);
1399 ae_dev
->dev_specs
.tnl_num
= req1
->tnl_num
;
1400 ae_dev
->dev_specs
.hilink_version
= req1
->hilink_version
;
1403 static void hclge_check_dev_specs(struct hclge_dev
*hdev
)
1405 struct hnae3_dev_specs
*dev_specs
= &hdev
->ae_dev
->dev_specs
;
1407 if (!dev_specs
->max_non_tso_bd_num
)
1408 dev_specs
->max_non_tso_bd_num
= HCLGE_MAX_NON_TSO_BD_NUM
;
1409 if (!dev_specs
->rss_ind_tbl_size
)
1410 dev_specs
->rss_ind_tbl_size
= HCLGE_RSS_IND_TBL_SIZE
;
1411 if (!dev_specs
->rss_key_size
)
1412 dev_specs
->rss_key_size
= HCLGE_COMM_RSS_KEY_SIZE
;
1413 if (!dev_specs
->max_tm_rate
)
1414 dev_specs
->max_tm_rate
= HCLGE_ETHER_MAX_RATE
;
1415 if (!dev_specs
->max_qset_num
)
1416 dev_specs
->max_qset_num
= HCLGE_MAX_QSET_NUM
;
1417 if (!dev_specs
->max_int_gl
)
1418 dev_specs
->max_int_gl
= HCLGE_DEF_MAX_INT_GL
;
1419 if (!dev_specs
->max_frm_size
)
1420 dev_specs
->max_frm_size
= HCLGE_MAC_MAX_FRAME
;
1421 if (!dev_specs
->umv_size
)
1422 dev_specs
->umv_size
= HCLGE_DEFAULT_UMV_SPACE_PER_PF
;
1425 static int hclge_query_mac_stats_num(struct hclge_dev
*hdev
)
1430 ret
= hclge_mac_query_reg_num(hdev
, ®_num
);
1431 if (ret
&& ret
!= -EOPNOTSUPP
)
1434 hdev
->ae_dev
->dev_specs
.mac_stats_num
= reg_num
;
1438 static int hclge_query_dev_specs(struct hclge_dev
*hdev
)
1440 struct hclge_desc desc
[HCLGE_QUERY_DEV_SPECS_BD_NUM
];
1444 ret
= hclge_query_mac_stats_num(hdev
);
1448 /* set default specifications as devices lower than version V3 do not
1449 * support querying specifications from firmware.
1451 if (hdev
->ae_dev
->dev_version
< HNAE3_DEVICE_VERSION_V3
) {
1452 hclge_set_default_dev_specs(hdev
);
1456 for (i
= 0; i
< HCLGE_QUERY_DEV_SPECS_BD_NUM
- 1; i
++) {
1457 hclge_cmd_setup_basic_desc(&desc
[i
], HCLGE_OPC_QUERY_DEV_SPECS
,
1459 desc
[i
].flag
|= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT
);
1461 hclge_cmd_setup_basic_desc(&desc
[i
], HCLGE_OPC_QUERY_DEV_SPECS
, true);
1463 ret
= hclge_cmd_send(&hdev
->hw
, desc
, HCLGE_QUERY_DEV_SPECS_BD_NUM
);
1467 hclge_parse_dev_specs(hdev
, desc
);
1468 hclge_check_dev_specs(hdev
);
1473 static int hclge_get_cap(struct hclge_dev
*hdev
)
1477 ret
= hclge_query_function_status(hdev
);
1479 dev_err(&hdev
->pdev
->dev
,
1480 "query function status error %d.\n", ret
);
1484 /* get pf resource */
1485 return hclge_query_pf_resource(hdev
);
1488 static void hclge_init_kdump_kernel_config(struct hclge_dev
*hdev
)
1490 #define HCLGE_MIN_TX_DESC 64
1491 #define HCLGE_MIN_RX_DESC 64
1493 if (!is_kdump_kernel())
1496 dev_info(&hdev
->pdev
->dev
,
1497 "Running kdump kernel. Using minimal resources\n");
1499 /* minimal queue pairs equals to the number of vports */
1500 hdev
->num_tqps
= hdev
->num_req_vfs
+ 1;
1501 hdev
->num_tx_desc
= HCLGE_MIN_TX_DESC
;
1502 hdev
->num_rx_desc
= HCLGE_MIN_RX_DESC
;
1505 static void hclge_init_tc_config(struct hclge_dev
*hdev
)
1509 if (hdev
->tc_max
> HNAE3_MAX_TC
||
1511 dev_warn(&hdev
->pdev
->dev
, "TC num = %u.\n",
1516 /* Dev does not support DCB */
1517 if (!hnae3_dev_dcb_supported(hdev
)) {
1521 hdev
->pfc_max
= hdev
->tc_max
;
1524 hdev
->tm_info
.num_tc
= 1;
1526 /* Currently not support uncontiuous tc */
1527 for (i
= 0; i
< hdev
->tm_info
.num_tc
; i
++)
1528 hnae3_set_bit(hdev
->hw_tc_map
, i
, 1);
1530 hdev
->tx_sch_mode
= HCLGE_FLAG_TC_BASE_SCH_MODE
;
1533 static int hclge_configure(struct hclge_dev
*hdev
)
1535 struct hnae3_ae_dev
*ae_dev
= pci_get_drvdata(hdev
->pdev
);
1536 struct hclge_cfg cfg
;
1539 ret
= hclge_get_cfg(hdev
, &cfg
);
1543 hdev
->base_tqp_pid
= 0;
1544 hdev
->vf_rss_size_max
= cfg
.vf_rss_size_max
;
1545 hdev
->pf_rss_size_max
= cfg
.pf_rss_size_max
;
1546 hdev
->rx_buf_len
= cfg
.rx_buf_len
;
1547 ether_addr_copy(hdev
->hw
.mac
.mac_addr
, cfg
.mac_addr
);
1548 hdev
->hw
.mac
.media_type
= cfg
.media_type
;
1549 hdev
->hw
.mac
.phy_addr
= cfg
.phy_addr
;
1550 hdev
->num_tx_desc
= cfg
.tqp_desc_num
;
1551 hdev
->num_rx_desc
= cfg
.tqp_desc_num
;
1552 hdev
->tm_info
.num_pg
= 1;
1553 hdev
->tc_max
= cfg
.tc_num
;
1554 hdev
->tm_info
.hw_pfc_map
= 0;
1556 hdev
->wanted_umv_size
= cfg
.umv_space
;
1558 hdev
->wanted_umv_size
= hdev
->ae_dev
->dev_specs
.umv_size
;
1559 hdev
->tx_spare_buf_size
= cfg
.tx_spare_buf_size
;
1560 hdev
->gro_en
= true;
1561 if (cfg
.vlan_fliter_cap
== HCLGE_VLAN_FLTR_CAN_MDF
)
1562 set_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B
, ae_dev
->caps
);
1564 if (hnae3_ae_dev_fd_supported(hdev
->ae_dev
)) {
1566 hdev
->fd_active_type
= HCLGE_FD_RULE_NONE
;
1569 ret
= hclge_parse_speed(cfg
.default_speed
, &hdev
->hw
.mac
.speed
);
1571 dev_err(&hdev
->pdev
->dev
, "failed to parse speed %u, ret = %d\n",
1572 cfg
.default_speed
, ret
);
1575 hdev
->hw
.mac
.req_speed
= hdev
->hw
.mac
.speed
;
1576 hdev
->hw
.mac
.req_autoneg
= AUTONEG_ENABLE
;
1577 hdev
->hw
.mac
.req_duplex
= DUPLEX_FULL
;
1579 hclge_parse_link_mode(hdev
, cfg
.speed_ability
);
1581 hdev
->hw
.mac
.max_speed
= hclge_get_max_speed(cfg
.speed_ability
);
1583 hclge_init_tc_config(hdev
);
1584 hclge_init_kdump_kernel_config(hdev
);
1589 static int hclge_config_tso(struct hclge_dev
*hdev
, u16 tso_mss_min
,
1592 struct hclge_cfg_tso_status_cmd
*req
;
1593 struct hclge_desc desc
;
1595 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_TSO_GENERIC_CONFIG
, false);
1597 req
= (struct hclge_cfg_tso_status_cmd
*)desc
.data
;
1598 req
->tso_mss_min
= cpu_to_le16(tso_mss_min
);
1599 req
->tso_mss_max
= cpu_to_le16(tso_mss_max
);
1601 return hclge_cmd_send(&hdev
->hw
, &desc
, 1);
1604 static int hclge_config_gro(struct hclge_dev
*hdev
)
1606 struct hclge_cfg_gro_status_cmd
*req
;
1607 struct hclge_desc desc
;
1610 if (!hnae3_ae_dev_gro_supported(hdev
->ae_dev
))
1613 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_GRO_GENERIC_CONFIG
, false);
1614 req
= (struct hclge_cfg_gro_status_cmd
*)desc
.data
;
1616 req
->gro_en
= hdev
->gro_en
? 1 : 0;
1618 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
1620 dev_err(&hdev
->pdev
->dev
,
1621 "GRO hardware config cmd failed, ret = %d\n", ret
);
1626 static int hclge_alloc_tqps(struct hclge_dev
*hdev
)
1628 struct hnae3_ae_dev
*ae_dev
= pci_get_drvdata(hdev
->pdev
);
1629 struct hclge_comm_tqp
*tqp
;
1632 hdev
->htqp
= devm_kcalloc(&hdev
->pdev
->dev
, hdev
->num_tqps
,
1633 sizeof(struct hclge_comm_tqp
), GFP_KERNEL
);
1639 for (i
= 0; i
< hdev
->num_tqps
; i
++) {
1640 tqp
->dev
= &hdev
->pdev
->dev
;
1643 tqp
->q
.ae_algo
= &ae_algo
;
1644 tqp
->q
.buf_size
= hdev
->rx_buf_len
;
1645 tqp
->q
.tx_desc_num
= hdev
->num_tx_desc
;
1646 tqp
->q
.rx_desc_num
= hdev
->num_rx_desc
;
1648 /* need an extended offset to configure queues >=
1649 * HCLGE_TQP_MAX_SIZE_DEV_V2
1651 if (i
< HCLGE_TQP_MAX_SIZE_DEV_V2
)
1652 tqp
->q
.io_base
= hdev
->hw
.hw
.io_base
+
1653 HCLGE_TQP_REG_OFFSET
+
1654 i
* HCLGE_TQP_REG_SIZE
;
1656 tqp
->q
.io_base
= hdev
->hw
.hw
.io_base
+
1657 HCLGE_TQP_REG_OFFSET
+
1658 HCLGE_TQP_EXT_REG_OFFSET
+
1659 (i
- HCLGE_TQP_MAX_SIZE_DEV_V2
) *
1662 /* when device supports tx push and has device memory,
1663 * the queue can execute push mode or doorbell mode on
1666 if (test_bit(HNAE3_DEV_SUPPORT_TX_PUSH_B
, ae_dev
->caps
))
1667 tqp
->q
.mem_base
= hdev
->hw
.hw
.mem_base
+
1668 HCLGE_TQP_MEM_OFFSET(hdev
, i
);
1676 static int hclge_map_tqps_to_func(struct hclge_dev
*hdev
, u16 func_id
,
1677 u16 tqp_pid
, u16 tqp_vid
, bool is_pf
)
1679 struct hclge_tqp_map_cmd
*req
;
1680 struct hclge_desc desc
;
1683 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_SET_TQP_MAP
, false);
1685 req
= (struct hclge_tqp_map_cmd
*)desc
.data
;
1686 req
->tqp_id
= cpu_to_le16(tqp_pid
);
1687 req
->tqp_vf
= func_id
;
1688 req
->tqp_flag
= 1U << HCLGE_TQP_MAP_EN_B
;
1690 req
->tqp_flag
|= 1U << HCLGE_TQP_MAP_TYPE_B
;
1691 req
->tqp_vid
= cpu_to_le16(tqp_vid
);
1693 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
1695 dev_err(&hdev
->pdev
->dev
, "TQP map failed %d.\n", ret
);
1700 static int hclge_assign_tqp(struct hclge_vport
*vport
, u16 num_tqps
)
1702 struct hnae3_knic_private_info
*kinfo
= &vport
->nic
.kinfo
;
1703 struct hclge_dev
*hdev
= vport
->back
;
1706 for (i
= 0, alloced
= 0; i
< hdev
->num_tqps
&&
1707 alloced
< num_tqps
; i
++) {
1708 if (!hdev
->htqp
[i
].alloced
) {
1709 hdev
->htqp
[i
].q
.handle
= &vport
->nic
;
1710 hdev
->htqp
[i
].q
.tqp_index
= alloced
;
1711 hdev
->htqp
[i
].q
.tx_desc_num
= kinfo
->num_tx_desc
;
1712 hdev
->htqp
[i
].q
.rx_desc_num
= kinfo
->num_rx_desc
;
1713 kinfo
->tqp
[alloced
] = &hdev
->htqp
[i
].q
;
1714 hdev
->htqp
[i
].alloced
= true;
1718 vport
->alloc_tqps
= alloced
;
1719 kinfo
->rss_size
= min_t(u16
, hdev
->pf_rss_size_max
,
1720 vport
->alloc_tqps
/ hdev
->tm_info
.num_tc
);
1722 /* ensure one to one mapping between irq and queue at default */
1723 kinfo
->rss_size
= min_t(u16
, kinfo
->rss_size
,
1724 (hdev
->num_nic_msi
- 1) / hdev
->tm_info
.num_tc
);
1729 static int hclge_knic_setup(struct hclge_vport
*vport
, u16 num_tqps
,
1730 u16 num_tx_desc
, u16 num_rx_desc
)
1733 struct hnae3_handle
*nic
= &vport
->nic
;
1734 struct hnae3_knic_private_info
*kinfo
= &nic
->kinfo
;
1735 struct hclge_dev
*hdev
= vport
->back
;
1738 kinfo
->num_tx_desc
= num_tx_desc
;
1739 kinfo
->num_rx_desc
= num_rx_desc
;
1741 kinfo
->rx_buf_len
= hdev
->rx_buf_len
;
1742 kinfo
->tx_spare_buf_size
= hdev
->tx_spare_buf_size
;
1744 kinfo
->tqp
= devm_kcalloc(&hdev
->pdev
->dev
, num_tqps
,
1745 sizeof(struct hnae3_queue
*), GFP_KERNEL
);
1749 ret
= hclge_assign_tqp(vport
, num_tqps
);
1751 dev_err(&hdev
->pdev
->dev
, "fail to assign TQPs %d.\n", ret
);
1756 static int hclge_map_tqp_to_vport(struct hclge_dev
*hdev
,
1757 struct hclge_vport
*vport
)
1759 struct hnae3_handle
*nic
= &vport
->nic
;
1760 struct hnae3_knic_private_info
*kinfo
;
1763 kinfo
= &nic
->kinfo
;
1764 for (i
= 0; i
< vport
->alloc_tqps
; i
++) {
1765 struct hclge_comm_tqp
*q
=
1766 container_of(kinfo
->tqp
[i
], struct hclge_comm_tqp
, q
);
1770 is_pf
= !(vport
->vport_id
);
1771 ret
= hclge_map_tqps_to_func(hdev
, vport
->vport_id
, q
->index
,
1780 static int hclge_map_tqp(struct hclge_dev
*hdev
)
1782 struct hclge_vport
*vport
= hdev
->vport
;
1785 num_vport
= hdev
->num_req_vfs
+ 1;
1786 for (i
= 0; i
< num_vport
; i
++) {
1789 ret
= hclge_map_tqp_to_vport(hdev
, vport
);
1799 static int hclge_vport_setup(struct hclge_vport
*vport
, u16 num_tqps
)
1801 struct hnae3_handle
*nic
= &vport
->nic
;
1802 struct hclge_dev
*hdev
= vport
->back
;
1805 nic
->pdev
= hdev
->pdev
;
1806 nic
->ae_algo
= &ae_algo
;
1807 bitmap_copy(nic
->numa_node_mask
.bits
, hdev
->numa_node_mask
.bits
,
1809 nic
->kinfo
.io_base
= hdev
->hw
.hw
.io_base
;
1811 ret
= hclge_knic_setup(vport
, num_tqps
,
1812 hdev
->num_tx_desc
, hdev
->num_rx_desc
);
1814 dev_err(&hdev
->pdev
->dev
, "knic setup failed %d\n", ret
);
1819 static int hclge_alloc_vport(struct hclge_dev
*hdev
)
1821 struct pci_dev
*pdev
= hdev
->pdev
;
1822 struct hclge_vport
*vport
;
1828 /* We need to alloc a vport for main NIC of PF */
1829 num_vport
= hdev
->num_req_vfs
+ 1;
1831 if (hdev
->num_tqps
< num_vport
) {
1832 dev_err(&hdev
->pdev
->dev
, "tqps(%u) is less than vports(%d)",
1833 hdev
->num_tqps
, num_vport
);
1837 /* Alloc the same number of TQPs for every vport */
1838 tqp_per_vport
= hdev
->num_tqps
/ num_vport
;
1839 tqp_main_vport
= tqp_per_vport
+ hdev
->num_tqps
% num_vport
;
1841 vport
= devm_kcalloc(&pdev
->dev
, num_vport
, sizeof(struct hclge_vport
),
1846 hdev
->vport
= vport
;
1847 hdev
->num_alloc_vport
= num_vport
;
1849 if (IS_ENABLED(CONFIG_PCI_IOV
))
1850 hdev
->num_alloc_vfs
= hdev
->num_req_vfs
;
1852 for (i
= 0; i
< num_vport
; i
++) {
1854 vport
->vport_id
= i
;
1855 vport
->vf_info
.link_state
= IFLA_VF_LINK_STATE_AUTO
;
1856 vport
->mps
= HCLGE_MAC_DEFAULT_FRAME
;
1857 vport
->port_base_vlan_cfg
.state
= HNAE3_PORT_BASE_VLAN_DISABLE
;
1858 vport
->port_base_vlan_cfg
.tbl_sta
= true;
1859 vport
->rxvlan_cfg
.rx_vlan_offload_en
= true;
1860 vport
->req_vlan_fltr_en
= true;
1861 INIT_LIST_HEAD(&vport
->vlan_list
);
1862 INIT_LIST_HEAD(&vport
->uc_mac_list
);
1863 INIT_LIST_HEAD(&vport
->mc_mac_list
);
1864 spin_lock_init(&vport
->mac_list_lock
);
1867 ret
= hclge_vport_setup(vport
, tqp_main_vport
);
1869 ret
= hclge_vport_setup(vport
, tqp_per_vport
);
1872 "vport setup failed for vport %d, %d\n",
1883 static int hclge_cmd_alloc_tx_buff(struct hclge_dev
*hdev
,
1884 struct hclge_pkt_buf_alloc
*buf_alloc
)
1886 /* TX buffer size is unit by 128 byte */
1887 #define HCLGE_BUF_SIZE_UNIT_SHIFT 7
1888 #define HCLGE_BUF_SIZE_UPDATE_EN_MSK BIT(15)
1889 struct hclge_tx_buff_alloc_cmd
*req
;
1890 struct hclge_desc desc
;
1894 req
= (struct hclge_tx_buff_alloc_cmd
*)desc
.data
;
1896 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_TX_BUFF_ALLOC
, 0);
1897 for (i
= 0; i
< HCLGE_MAX_TC_NUM
; i
++) {
1898 u32 buf_size
= buf_alloc
->priv_buf
[i
].tx_buf_size
;
1900 req
->tx_pkt_buff
[i
] =
1901 cpu_to_le16((buf_size
>> HCLGE_BUF_SIZE_UNIT_SHIFT
) |
1902 HCLGE_BUF_SIZE_UPDATE_EN_MSK
);
1905 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
1907 dev_err(&hdev
->pdev
->dev
, "tx buffer alloc cmd failed %d.\n",
1913 static int hclge_tx_buffer_alloc(struct hclge_dev
*hdev
,
1914 struct hclge_pkt_buf_alloc
*buf_alloc
)
1916 int ret
= hclge_cmd_alloc_tx_buff(hdev
, buf_alloc
);
1919 dev_err(&hdev
->pdev
->dev
, "tx buffer alloc failed %d\n", ret
);
1924 static u32
hclge_get_tc_num(struct hclge_dev
*hdev
)
1929 for (i
= 0; i
< HCLGE_MAX_TC_NUM
; i
++)
1930 if (hdev
->hw_tc_map
& BIT(i
))
1935 /* Get the number of pfc enabled TCs, which have private buffer */
1936 static int hclge_get_pfc_priv_num(struct hclge_dev
*hdev
,
1937 struct hclge_pkt_buf_alloc
*buf_alloc
)
1939 struct hclge_priv_buf
*priv
;
1943 for (i
= 0; i
< HCLGE_MAX_TC_NUM
; i
++) {
1944 priv
= &buf_alloc
->priv_buf
[i
];
1945 if ((hdev
->tm_info
.hw_pfc_map
& BIT(i
)) &&
1953 /* Get the number of pfc disabled TCs, which have private buffer */
1954 static int hclge_get_no_pfc_priv_num(struct hclge_dev
*hdev
,
1955 struct hclge_pkt_buf_alloc
*buf_alloc
)
1957 struct hclge_priv_buf
*priv
;
1961 for (i
= 0; i
< HCLGE_MAX_TC_NUM
; i
++) {
1962 priv
= &buf_alloc
->priv_buf
[i
];
1963 if (hdev
->hw_tc_map
& BIT(i
) &&
1964 !(hdev
->tm_info
.hw_pfc_map
& BIT(i
)) &&
1972 static u32
hclge_get_rx_priv_buff_alloced(struct hclge_pkt_buf_alloc
*buf_alloc
)
1974 struct hclge_priv_buf
*priv
;
1978 for (i
= 0; i
< HCLGE_MAX_TC_NUM
; i
++) {
1979 priv
= &buf_alloc
->priv_buf
[i
];
1981 rx_priv
+= priv
->buf_size
;
1986 static u32
hclge_get_tx_buff_alloced(struct hclge_pkt_buf_alloc
*buf_alloc
)
1988 u32 i
, total_tx_size
= 0;
1990 for (i
= 0; i
< HCLGE_MAX_TC_NUM
; i
++)
1991 total_tx_size
+= buf_alloc
->priv_buf
[i
].tx_buf_size
;
1993 return total_tx_size
;
1996 static bool hclge_is_rx_buf_ok(struct hclge_dev
*hdev
,
1997 struct hclge_pkt_buf_alloc
*buf_alloc
,
2000 u32 shared_buf_min
, shared_buf_tc
, shared_std
, hi_thrd
, lo_thrd
;
2001 u32 tc_num
= hclge_get_tc_num(hdev
);
2002 u32 shared_buf
, aligned_mps
;
2006 aligned_mps
= roundup(hdev
->mps
, HCLGE_BUF_SIZE_UNIT
);
2008 if (hnae3_dev_dcb_supported(hdev
))
2009 shared_buf_min
= HCLGE_BUF_MUL_BY
* aligned_mps
+
2012 shared_buf_min
= aligned_mps
+ HCLGE_NON_DCB_ADDITIONAL_BUF
2013 + hdev
->dv_buf_size
;
2015 shared_buf_tc
= tc_num
* aligned_mps
+ aligned_mps
;
2016 shared_std
= roundup(max_t(u32
, shared_buf_min
, shared_buf_tc
),
2017 HCLGE_BUF_SIZE_UNIT
);
2019 rx_priv
= hclge_get_rx_priv_buff_alloced(buf_alloc
);
2020 if (rx_all
< rx_priv
+ shared_std
)
2023 shared_buf
= rounddown(rx_all
- rx_priv
, HCLGE_BUF_SIZE_UNIT
);
2024 buf_alloc
->s_buf
.buf_size
= shared_buf
;
2025 if (hnae3_dev_dcb_supported(hdev
)) {
2026 buf_alloc
->s_buf
.self
.high
= shared_buf
- hdev
->dv_buf_size
;
2027 buf_alloc
->s_buf
.self
.low
= buf_alloc
->s_buf
.self
.high
2028 - roundup(aligned_mps
/ HCLGE_BUF_DIV_BY
,
2029 HCLGE_BUF_SIZE_UNIT
);
2031 buf_alloc
->s_buf
.self
.high
= aligned_mps
+
2032 HCLGE_NON_DCB_ADDITIONAL_BUF
;
2033 buf_alloc
->s_buf
.self
.low
= aligned_mps
;
2036 if (hnae3_dev_dcb_supported(hdev
)) {
2037 hi_thrd
= shared_buf
- hdev
->dv_buf_size
;
2039 if (tc_num
<= NEED_RESERVE_TC_NUM
)
2040 hi_thrd
= hi_thrd
* BUF_RESERVE_PERCENT
2044 hi_thrd
= hi_thrd
/ tc_num
;
2046 hi_thrd
= max_t(u32
, hi_thrd
, HCLGE_BUF_MUL_BY
* aligned_mps
);
2047 hi_thrd
= rounddown(hi_thrd
, HCLGE_BUF_SIZE_UNIT
);
2048 lo_thrd
= hi_thrd
- aligned_mps
/ HCLGE_BUF_DIV_BY
;
2050 hi_thrd
= aligned_mps
+ HCLGE_NON_DCB_ADDITIONAL_BUF
;
2051 lo_thrd
= aligned_mps
;
2054 for (i
= 0; i
< HCLGE_MAX_TC_NUM
; i
++) {
2055 buf_alloc
->s_buf
.tc_thrd
[i
].low
= lo_thrd
;
2056 buf_alloc
->s_buf
.tc_thrd
[i
].high
= hi_thrd
;
2062 static int hclge_tx_buffer_calc(struct hclge_dev
*hdev
,
2063 struct hclge_pkt_buf_alloc
*buf_alloc
)
2067 total_size
= hdev
->pkt_buf_size
;
2069 /* alloc tx buffer for all enabled tc */
2070 for (i
= 0; i
< HCLGE_MAX_TC_NUM
; i
++) {
2071 struct hclge_priv_buf
*priv
= &buf_alloc
->priv_buf
[i
];
2073 if (hdev
->hw_tc_map
& BIT(i
)) {
2074 if (total_size
< hdev
->tx_buf_size
)
2077 priv
->tx_buf_size
= hdev
->tx_buf_size
;
2079 priv
->tx_buf_size
= 0;
2082 total_size
-= priv
->tx_buf_size
;
2088 static bool hclge_rx_buf_calc_all(struct hclge_dev
*hdev
, bool max
,
2089 struct hclge_pkt_buf_alloc
*buf_alloc
)
2091 u32 rx_all
= hdev
->pkt_buf_size
- hclge_get_tx_buff_alloced(buf_alloc
);
2092 u32 aligned_mps
= round_up(hdev
->mps
, HCLGE_BUF_SIZE_UNIT
);
2095 for (i
= 0; i
< HCLGE_MAX_TC_NUM
; i
++) {
2096 struct hclge_priv_buf
*priv
= &buf_alloc
->priv_buf
[i
];
2103 if (!(hdev
->hw_tc_map
& BIT(i
)))
2108 if (hdev
->tm_info
.hw_pfc_map
& BIT(i
)) {
2109 priv
->wl
.low
= max
? aligned_mps
: HCLGE_BUF_SIZE_UNIT
;
2110 priv
->wl
.high
= roundup(priv
->wl
.low
+ aligned_mps
,
2111 HCLGE_BUF_SIZE_UNIT
);
2114 priv
->wl
.high
= max
? (aligned_mps
* HCLGE_BUF_MUL_BY
) :
2118 priv
->buf_size
= priv
->wl
.high
+ hdev
->dv_buf_size
;
2121 return hclge_is_rx_buf_ok(hdev
, buf_alloc
, rx_all
);
2124 static bool hclge_drop_nopfc_buf_till_fit(struct hclge_dev
*hdev
,
2125 struct hclge_pkt_buf_alloc
*buf_alloc
)
2127 u32 rx_all
= hdev
->pkt_buf_size
- hclge_get_tx_buff_alloced(buf_alloc
);
2128 int no_pfc_priv_num
= hclge_get_no_pfc_priv_num(hdev
, buf_alloc
);
2131 /* let the last to be cleared first */
2132 for (i
= HCLGE_MAX_TC_NUM
- 1; i
>= 0; i
--) {
2133 struct hclge_priv_buf
*priv
= &buf_alloc
->priv_buf
[i
];
2134 unsigned int mask
= BIT((unsigned int)i
);
2136 if (hdev
->hw_tc_map
& mask
&&
2137 !(hdev
->tm_info
.hw_pfc_map
& mask
)) {
2138 /* Clear the no pfc TC private buffer */
2146 if (hclge_is_rx_buf_ok(hdev
, buf_alloc
, rx_all
) ||
2147 no_pfc_priv_num
== 0)
2151 return hclge_is_rx_buf_ok(hdev
, buf_alloc
, rx_all
);
2154 static bool hclge_drop_pfc_buf_till_fit(struct hclge_dev
*hdev
,
2155 struct hclge_pkt_buf_alloc
*buf_alloc
)
2157 u32 rx_all
= hdev
->pkt_buf_size
- hclge_get_tx_buff_alloced(buf_alloc
);
2158 int pfc_priv_num
= hclge_get_pfc_priv_num(hdev
, buf_alloc
);
2161 /* let the last to be cleared first */
2162 for (i
= HCLGE_MAX_TC_NUM
- 1; i
>= 0; i
--) {
2163 struct hclge_priv_buf
*priv
= &buf_alloc
->priv_buf
[i
];
2164 unsigned int mask
= BIT((unsigned int)i
);
2166 if (hdev
->hw_tc_map
& mask
&&
2167 hdev
->tm_info
.hw_pfc_map
& mask
) {
2168 /* Reduce the number of pfc TC with private buffer */
2176 if (hclge_is_rx_buf_ok(hdev
, buf_alloc
, rx_all
) ||
2181 return hclge_is_rx_buf_ok(hdev
, buf_alloc
, rx_all
);
2184 static int hclge_only_alloc_priv_buff(struct hclge_dev
*hdev
,
2185 struct hclge_pkt_buf_alloc
*buf_alloc
)
2187 #define COMPENSATE_BUFFER 0x3C00
2188 #define COMPENSATE_HALF_MPS_NUM 5
2189 #define PRIV_WL_GAP 0x1800
2191 u32 rx_priv
= hdev
->pkt_buf_size
- hclge_get_tx_buff_alloced(buf_alloc
);
2192 u32 tc_num
= hclge_get_tc_num(hdev
);
2193 u32 half_mps
= hdev
->mps
>> 1;
2198 rx_priv
= rx_priv
/ tc_num
;
2200 if (tc_num
<= NEED_RESERVE_TC_NUM
)
2201 rx_priv
= rx_priv
* BUF_RESERVE_PERCENT
/ BUF_MAX_PERCENT
;
2203 min_rx_priv
= hdev
->dv_buf_size
+ COMPENSATE_BUFFER
+
2204 COMPENSATE_HALF_MPS_NUM
* half_mps
;
2205 min_rx_priv
= round_up(min_rx_priv
, HCLGE_BUF_SIZE_UNIT
);
2206 rx_priv
= round_down(rx_priv
, HCLGE_BUF_SIZE_UNIT
);
2207 if (rx_priv
< min_rx_priv
)
2210 for (i
= 0; i
< HCLGE_MAX_TC_NUM
; i
++) {
2211 struct hclge_priv_buf
*priv
= &buf_alloc
->priv_buf
[i
];
2218 if (!(hdev
->hw_tc_map
& BIT(i
)))
2222 priv
->buf_size
= rx_priv
;
2223 priv
->wl
.high
= rx_priv
- hdev
->dv_buf_size
;
2224 priv
->wl
.low
= priv
->wl
.high
- PRIV_WL_GAP
;
2227 buf_alloc
->s_buf
.buf_size
= 0;
2232 /* hclge_rx_buffer_calc: calculate the rx private buffer size for all TCs
2233 * @hdev: pointer to struct hclge_dev
2234 * @buf_alloc: pointer to buffer calculation data
2235 * @return: 0: calculate successful, negative: fail
2237 static int hclge_rx_buffer_calc(struct hclge_dev
*hdev
,
2238 struct hclge_pkt_buf_alloc
*buf_alloc
)
2240 /* When DCB is not supported, rx private buffer is not allocated. */
2241 if (!hnae3_dev_dcb_supported(hdev
)) {
2242 u32 rx_all
= hdev
->pkt_buf_size
;
2244 rx_all
-= hclge_get_tx_buff_alloced(buf_alloc
);
2245 if (!hclge_is_rx_buf_ok(hdev
, buf_alloc
, rx_all
))
2251 if (hclge_only_alloc_priv_buff(hdev
, buf_alloc
))
2254 if (hclge_rx_buf_calc_all(hdev
, true, buf_alloc
))
2257 /* try to decrease the buffer size */
2258 if (hclge_rx_buf_calc_all(hdev
, false, buf_alloc
))
2261 if (hclge_drop_nopfc_buf_till_fit(hdev
, buf_alloc
))
2264 if (hclge_drop_pfc_buf_till_fit(hdev
, buf_alloc
))
2270 static int hclge_rx_priv_buf_alloc(struct hclge_dev
*hdev
,
2271 struct hclge_pkt_buf_alloc
*buf_alloc
)
2273 struct hclge_rx_priv_buff_cmd
*req
;
2274 struct hclge_desc desc
;
2278 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_RX_PRIV_BUFF_ALLOC
, false);
2279 req
= (struct hclge_rx_priv_buff_cmd
*)desc
.data
;
2281 /* Alloc private buffer TCs */
2282 for (i
= 0; i
< HCLGE_MAX_TC_NUM
; i
++) {
2283 struct hclge_priv_buf
*priv
= &buf_alloc
->priv_buf
[i
];
2286 cpu_to_le16(priv
->buf_size
>> HCLGE_BUF_UNIT_S
);
2288 cpu_to_le16(1 << HCLGE_TC0_PRI_BUF_EN_B
);
2292 cpu_to_le16((buf_alloc
->s_buf
.buf_size
>> HCLGE_BUF_UNIT_S
) |
2293 (1 << HCLGE_TC0_PRI_BUF_EN_B
));
2295 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
2297 dev_err(&hdev
->pdev
->dev
,
2298 "rx private buffer alloc cmd failed %d\n", ret
);
2303 static int hclge_rx_priv_wl_config(struct hclge_dev
*hdev
,
2304 struct hclge_pkt_buf_alloc
*buf_alloc
)
2306 struct hclge_rx_priv_wl_buf
*req
;
2307 struct hclge_priv_buf
*priv
;
2308 struct hclge_desc desc
[2];
2312 for (i
= 0; i
< 2; i
++) {
2313 hclge_cmd_setup_basic_desc(&desc
[i
], HCLGE_OPC_RX_PRIV_WL_ALLOC
,
2315 req
= (struct hclge_rx_priv_wl_buf
*)desc
[i
].data
;
2317 /* The first descriptor set the NEXT bit to 1 */
2319 desc
[i
].flag
|= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT
);
2321 desc
[i
].flag
&= ~cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT
);
2323 for (j
= 0; j
< HCLGE_TC_NUM_ONE_DESC
; j
++) {
2324 u32 idx
= i
* HCLGE_TC_NUM_ONE_DESC
+ j
;
2326 priv
= &buf_alloc
->priv_buf
[idx
];
2327 req
->tc_wl
[j
].high
=
2328 cpu_to_le16(priv
->wl
.high
>> HCLGE_BUF_UNIT_S
);
2329 req
->tc_wl
[j
].high
|=
2330 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B
));
2332 cpu_to_le16(priv
->wl
.low
>> HCLGE_BUF_UNIT_S
);
2333 req
->tc_wl
[j
].low
|=
2334 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B
));
2338 /* Send 2 descriptor at one time */
2339 ret
= hclge_cmd_send(&hdev
->hw
, desc
, 2);
2341 dev_err(&hdev
->pdev
->dev
,
2342 "rx private waterline config cmd failed %d\n",
2347 static int hclge_common_thrd_config(struct hclge_dev
*hdev
,
2348 struct hclge_pkt_buf_alloc
*buf_alloc
)
2350 struct hclge_shared_buf
*s_buf
= &buf_alloc
->s_buf
;
2351 struct hclge_rx_com_thrd
*req
;
2352 struct hclge_desc desc
[2];
2353 struct hclge_tc_thrd
*tc
;
2357 for (i
= 0; i
< 2; i
++) {
2358 hclge_cmd_setup_basic_desc(&desc
[i
],
2359 HCLGE_OPC_RX_COM_THRD_ALLOC
, false);
2360 req
= (struct hclge_rx_com_thrd
*)&desc
[i
].data
;
2362 /* The first descriptor set the NEXT bit to 1 */
2364 desc
[i
].flag
|= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT
);
2366 desc
[i
].flag
&= ~cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT
);
2368 for (j
= 0; j
< HCLGE_TC_NUM_ONE_DESC
; j
++) {
2369 tc
= &s_buf
->tc_thrd
[i
* HCLGE_TC_NUM_ONE_DESC
+ j
];
2371 req
->com_thrd
[j
].high
=
2372 cpu_to_le16(tc
->high
>> HCLGE_BUF_UNIT_S
);
2373 req
->com_thrd
[j
].high
|=
2374 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B
));
2375 req
->com_thrd
[j
].low
=
2376 cpu_to_le16(tc
->low
>> HCLGE_BUF_UNIT_S
);
2377 req
->com_thrd
[j
].low
|=
2378 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B
));
2382 /* Send 2 descriptors at one time */
2383 ret
= hclge_cmd_send(&hdev
->hw
, desc
, 2);
2385 dev_err(&hdev
->pdev
->dev
,
2386 "common threshold config cmd failed %d\n", ret
);
2390 static int hclge_common_wl_config(struct hclge_dev
*hdev
,
2391 struct hclge_pkt_buf_alloc
*buf_alloc
)
2393 struct hclge_shared_buf
*buf
= &buf_alloc
->s_buf
;
2394 struct hclge_rx_com_wl
*req
;
2395 struct hclge_desc desc
;
2398 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_RX_COM_WL_ALLOC
, false);
2400 req
= (struct hclge_rx_com_wl
*)desc
.data
;
2401 req
->com_wl
.high
= cpu_to_le16(buf
->self
.high
>> HCLGE_BUF_UNIT_S
);
2402 req
->com_wl
.high
|= cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B
));
2404 req
->com_wl
.low
= cpu_to_le16(buf
->self
.low
>> HCLGE_BUF_UNIT_S
);
2405 req
->com_wl
.low
|= cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B
));
2407 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
2409 dev_err(&hdev
->pdev
->dev
,
2410 "common waterline config cmd failed %d\n", ret
);
2415 int hclge_buffer_alloc(struct hclge_dev
*hdev
)
2417 struct hclge_pkt_buf_alloc
*pkt_buf
;
2420 pkt_buf
= kzalloc(sizeof(*pkt_buf
), GFP_KERNEL
);
2424 ret
= hclge_tx_buffer_calc(hdev
, pkt_buf
);
2426 dev_err(&hdev
->pdev
->dev
,
2427 "could not calc tx buffer size for all TCs %d\n", ret
);
2431 ret
= hclge_tx_buffer_alloc(hdev
, pkt_buf
);
2433 dev_err(&hdev
->pdev
->dev
,
2434 "could not alloc tx buffers %d\n", ret
);
2438 ret
= hclge_rx_buffer_calc(hdev
, pkt_buf
);
2440 dev_err(&hdev
->pdev
->dev
,
2441 "could not calc rx priv buffer size for all TCs %d\n",
2446 ret
= hclge_rx_priv_buf_alloc(hdev
, pkt_buf
);
2448 dev_err(&hdev
->pdev
->dev
, "could not alloc rx priv buffer %d\n",
2453 if (hnae3_dev_dcb_supported(hdev
)) {
2454 ret
= hclge_rx_priv_wl_config(hdev
, pkt_buf
);
2456 dev_err(&hdev
->pdev
->dev
,
2457 "could not configure rx private waterline %d\n",
2462 ret
= hclge_common_thrd_config(hdev
, pkt_buf
);
2464 dev_err(&hdev
->pdev
->dev
,
2465 "could not configure common threshold %d\n",
2471 ret
= hclge_common_wl_config(hdev
, pkt_buf
);
2473 dev_err(&hdev
->pdev
->dev
,
2474 "could not configure common waterline %d\n", ret
);
2481 static int hclge_init_roce_base_info(struct hclge_vport
*vport
)
2483 struct hnae3_handle
*roce
= &vport
->roce
;
2484 struct hnae3_handle
*nic
= &vport
->nic
;
2485 struct hclge_dev
*hdev
= vport
->back
;
2487 roce
->rinfo
.num_vectors
= vport
->back
->num_roce_msi
;
2489 if (hdev
->num_msi
< hdev
->num_nic_msi
+ hdev
->num_roce_msi
)
2492 roce
->rinfo
.base_vector
= hdev
->num_nic_msi
;
2494 roce
->rinfo
.netdev
= nic
->kinfo
.netdev
;
2495 roce
->rinfo
.roce_io_base
= hdev
->hw
.hw
.io_base
;
2496 roce
->rinfo
.roce_mem_base
= hdev
->hw
.hw
.mem_base
;
2498 roce
->pdev
= nic
->pdev
;
2499 roce
->ae_algo
= nic
->ae_algo
;
2500 bitmap_copy(roce
->numa_node_mask
.bits
, nic
->numa_node_mask
.bits
,
2506 static int hclge_init_msi(struct hclge_dev
*hdev
)
2508 struct pci_dev
*pdev
= hdev
->pdev
;
2512 vectors
= pci_alloc_irq_vectors(pdev
, HNAE3_MIN_VECTOR_NUM
,
2514 PCI_IRQ_MSI
| PCI_IRQ_MSIX
);
2517 "failed(%d) to allocate MSI/MSI-X vectors\n",
2521 if (vectors
< hdev
->num_msi
)
2522 dev_warn(&hdev
->pdev
->dev
,
2523 "requested %u MSI/MSI-X, but allocated %d MSI/MSI-X\n",
2524 hdev
->num_msi
, vectors
);
2526 hdev
->num_msi
= vectors
;
2527 hdev
->num_msi_left
= vectors
;
2529 hdev
->vector_status
= devm_kcalloc(&pdev
->dev
, hdev
->num_msi
,
2530 sizeof(u16
), GFP_KERNEL
);
2531 if (!hdev
->vector_status
) {
2532 pci_free_irq_vectors(pdev
);
2536 for (i
= 0; i
< hdev
->num_msi
; i
++)
2537 hdev
->vector_status
[i
] = HCLGE_INVALID_VPORT
;
2539 hdev
->vector_irq
= devm_kcalloc(&pdev
->dev
, hdev
->num_msi
,
2540 sizeof(int), GFP_KERNEL
);
2541 if (!hdev
->vector_irq
) {
2542 pci_free_irq_vectors(pdev
);
2549 static u8
hclge_check_speed_dup(u8 duplex
, int speed
)
2551 if (!(speed
== HCLGE_MAC_SPEED_10M
|| speed
== HCLGE_MAC_SPEED_100M
))
2552 duplex
= HCLGE_MAC_FULL
;
2557 static struct hclge_mac_speed_map hclge_mac_speed_map_to_fw
[] = {
2558 {HCLGE_MAC_SPEED_10M
, HCLGE_FW_MAC_SPEED_10M
},
2559 {HCLGE_MAC_SPEED_100M
, HCLGE_FW_MAC_SPEED_100M
},
2560 {HCLGE_MAC_SPEED_1G
, HCLGE_FW_MAC_SPEED_1G
},
2561 {HCLGE_MAC_SPEED_10G
, HCLGE_FW_MAC_SPEED_10G
},
2562 {HCLGE_MAC_SPEED_25G
, HCLGE_FW_MAC_SPEED_25G
},
2563 {HCLGE_MAC_SPEED_40G
, HCLGE_FW_MAC_SPEED_40G
},
2564 {HCLGE_MAC_SPEED_50G
, HCLGE_FW_MAC_SPEED_50G
},
2565 {HCLGE_MAC_SPEED_100G
, HCLGE_FW_MAC_SPEED_100G
},
2566 {HCLGE_MAC_SPEED_200G
, HCLGE_FW_MAC_SPEED_200G
},
2569 static int hclge_convert_to_fw_speed(u32 speed_drv
, u32
*speed_fw
)
2573 for (i
= 0; i
< ARRAY_SIZE(hclge_mac_speed_map_to_fw
); i
++) {
2574 if (hclge_mac_speed_map_to_fw
[i
].speed_drv
== speed_drv
) {
2575 *speed_fw
= hclge_mac_speed_map_to_fw
[i
].speed_fw
;
2583 static int hclge_cfg_mac_speed_dup_hw(struct hclge_dev
*hdev
, int speed
,
2584 u8 duplex
, u8 lane_num
)
2586 struct hclge_config_mac_speed_dup_cmd
*req
;
2587 struct hclge_desc desc
;
2591 req
= (struct hclge_config_mac_speed_dup_cmd
*)desc
.data
;
2593 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_CONFIG_SPEED_DUP
, false);
2596 hnae3_set_bit(req
->speed_dup
, HCLGE_CFG_DUPLEX_B
, 1);
2598 ret
= hclge_convert_to_fw_speed(speed
, &speed_fw
);
2600 dev_err(&hdev
->pdev
->dev
, "invalid speed (%d)\n", speed
);
2604 hnae3_set_field(req
->speed_dup
, HCLGE_CFG_SPEED_M
, HCLGE_CFG_SPEED_S
,
2606 hnae3_set_bit(req
->mac_change_fec_en
, HCLGE_CFG_MAC_SPEED_CHANGE_EN_B
,
2608 req
->lane_num
= lane_num
;
2610 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
2612 dev_err(&hdev
->pdev
->dev
,
2613 "mac speed/duplex config cmd failed %d.\n", ret
);
2620 int hclge_cfg_mac_speed_dup(struct hclge_dev
*hdev
, int speed
, u8 duplex
, u8 lane_num
)
2622 struct hclge_mac
*mac
= &hdev
->hw
.mac
;
2625 duplex
= hclge_check_speed_dup(duplex
, speed
);
2626 if (!mac
->support_autoneg
&& mac
->speed
== speed
&&
2627 mac
->duplex
== duplex
&& (mac
->lane_num
== lane_num
|| lane_num
== 0))
2630 ret
= hclge_cfg_mac_speed_dup_hw(hdev
, speed
, duplex
, lane_num
);
2634 hdev
->hw
.mac
.speed
= speed
;
2635 hdev
->hw
.mac
.duplex
= duplex
;
2637 hdev
->hw
.mac
.lane_num
= lane_num
;
2642 static int hclge_cfg_mac_speed_dup_h(struct hnae3_handle
*handle
, int speed
,
2643 u8 duplex
, u8 lane_num
)
2645 struct hclge_vport
*vport
= hclge_get_vport(handle
);
2646 struct hclge_dev
*hdev
= vport
->back
;
2649 ret
= hclge_cfg_mac_speed_dup(hdev
, speed
, duplex
, lane_num
);
2654 hdev
->hw
.mac
.req_speed
= speed
;
2655 hdev
->hw
.mac
.req_duplex
= duplex
;
2660 static int hclge_set_autoneg_en(struct hclge_dev
*hdev
, bool enable
)
2662 struct hclge_config_auto_neg_cmd
*req
;
2663 struct hclge_desc desc
;
2667 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_CONFIG_AN_MODE
, false);
2669 req
= (struct hclge_config_auto_neg_cmd
*)desc
.data
;
2671 hnae3_set_bit(flag
, HCLGE_MAC_CFG_AN_EN_B
, 1U);
2672 req
->cfg_an_cmd_flag
= cpu_to_le32(flag
);
2674 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
2676 dev_err(&hdev
->pdev
->dev
, "auto neg set cmd failed %d.\n",
2682 static int hclge_set_autoneg(struct hnae3_handle
*handle
, bool enable
)
2684 struct hclge_vport
*vport
= hclge_get_vport(handle
);
2685 struct hclge_dev
*hdev
= vport
->back
;
2687 if (!hdev
->hw
.mac
.support_autoneg
) {
2689 dev_err(&hdev
->pdev
->dev
,
2690 "autoneg is not supported by current port\n");
2697 return hclge_set_autoneg_en(hdev
, enable
);
2700 static int hclge_get_autoneg(struct hnae3_handle
*handle
)
2702 struct hclge_vport
*vport
= hclge_get_vport(handle
);
2703 struct hclge_dev
*hdev
= vport
->back
;
2704 struct phy_device
*phydev
= hdev
->hw
.mac
.phydev
;
2707 return phydev
->autoneg
;
2709 return hdev
->hw
.mac
.autoneg
;
2712 static int hclge_restart_autoneg(struct hnae3_handle
*handle
)
2714 struct hclge_vport
*vport
= hclge_get_vport(handle
);
2715 struct hclge_dev
*hdev
= vport
->back
;
2718 dev_dbg(&hdev
->pdev
->dev
, "restart autoneg\n");
2720 ret
= hclge_notify_client(hdev
, HNAE3_DOWN_CLIENT
);
2723 return hclge_notify_client(hdev
, HNAE3_UP_CLIENT
);
2726 static int hclge_halt_autoneg(struct hnae3_handle
*handle
, bool halt
)
2728 struct hclge_vport
*vport
= hclge_get_vport(handle
);
2729 struct hclge_dev
*hdev
= vport
->back
;
2731 if (hdev
->hw
.mac
.support_autoneg
&& hdev
->hw
.mac
.autoneg
)
2732 return hclge_set_autoneg_en(hdev
, !halt
);
2737 static void hclge_parse_fec_stats_lanes(struct hclge_dev
*hdev
,
2738 struct hclge_desc
*desc
, u32 desc_len
)
2740 u32 lane_size
= HCLGE_FEC_STATS_MAX_LANES
* 2;
2745 for (i
= 0; i
< lane_size
; i
++) {
2746 if (data_index
>= HCLGE_DESC_DATA_LEN
) {
2751 if (desc_index
>= desc_len
)
2754 hdev
->fec_stats
.per_lanes
[i
] +=
2755 le32_to_cpu(desc
[desc_index
].data
[data_index
]);
2760 static void hclge_parse_fec_stats(struct hclge_dev
*hdev
,
2761 struct hclge_desc
*desc
, u32 desc_len
)
2763 struct hclge_query_fec_stats_cmd
*req
;
2765 req
= (struct hclge_query_fec_stats_cmd
*)desc
[0].data
;
2767 hdev
->fec_stats
.base_r_lane_num
= req
->base_r_lane_num
;
2768 hdev
->fec_stats
.rs_corr_blocks
+=
2769 le32_to_cpu(req
->rs_fec_corr_blocks
);
2770 hdev
->fec_stats
.rs_uncorr_blocks
+=
2771 le32_to_cpu(req
->rs_fec_uncorr_blocks
);
2772 hdev
->fec_stats
.rs_error_blocks
+=
2773 le32_to_cpu(req
->rs_fec_error_blocks
);
2774 hdev
->fec_stats
.base_r_corr_blocks
+=
2775 le32_to_cpu(req
->base_r_fec_corr_blocks
);
2776 hdev
->fec_stats
.base_r_uncorr_blocks
+=
2777 le32_to_cpu(req
->base_r_fec_uncorr_blocks
);
2779 hclge_parse_fec_stats_lanes(hdev
, &desc
[1], desc_len
- 1);
2782 static int hclge_update_fec_stats_hw(struct hclge_dev
*hdev
)
2784 struct hclge_desc desc
[HCLGE_FEC_STATS_CMD_NUM
];
2788 for (i
= 0; i
< HCLGE_FEC_STATS_CMD_NUM
; i
++) {
2789 hclge_cmd_setup_basic_desc(&desc
[i
], HCLGE_OPC_QUERY_FEC_STATS
,
2791 if (i
!= (HCLGE_FEC_STATS_CMD_NUM
- 1))
2792 desc
[i
].flag
|= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT
);
2795 ret
= hclge_cmd_send(&hdev
->hw
, desc
, HCLGE_FEC_STATS_CMD_NUM
);
2799 hclge_parse_fec_stats(hdev
, desc
, HCLGE_FEC_STATS_CMD_NUM
);
2804 static void hclge_update_fec_stats(struct hclge_dev
*hdev
)
2806 struct hnae3_ae_dev
*ae_dev
= pci_get_drvdata(hdev
->pdev
);
2809 if (!hnae3_ae_dev_fec_stats_supported(ae_dev
) ||
2810 test_and_set_bit(HCLGE_STATE_FEC_STATS_UPDATING
, &hdev
->state
))
2813 ret
= hclge_update_fec_stats_hw(hdev
);
2815 dev_err(&hdev
->pdev
->dev
,
2816 "failed to update fec stats, ret = %d\n", ret
);
2818 clear_bit(HCLGE_STATE_FEC_STATS_UPDATING
, &hdev
->state
);
2821 static void hclge_get_fec_stats_total(struct hclge_dev
*hdev
,
2822 struct ethtool_fec_stats
*fec_stats
)
2824 fec_stats
->corrected_blocks
.total
= hdev
->fec_stats
.rs_corr_blocks
;
2825 fec_stats
->uncorrectable_blocks
.total
=
2826 hdev
->fec_stats
.rs_uncorr_blocks
;
2829 static void hclge_get_fec_stats_lanes(struct hclge_dev
*hdev
,
2830 struct ethtool_fec_stats
*fec_stats
)
2834 if (hdev
->fec_stats
.base_r_lane_num
== 0 ||
2835 hdev
->fec_stats
.base_r_lane_num
> HCLGE_FEC_STATS_MAX_LANES
) {
2836 dev_err(&hdev
->pdev
->dev
,
2837 "fec stats lane number(%llu) is invalid\n",
2838 hdev
->fec_stats
.base_r_lane_num
);
2842 for (i
= 0; i
< hdev
->fec_stats
.base_r_lane_num
; i
++) {
2843 fec_stats
->corrected_blocks
.lanes
[i
] =
2844 hdev
->fec_stats
.base_r_corr_per_lanes
[i
];
2845 fec_stats
->uncorrectable_blocks
.lanes
[i
] =
2846 hdev
->fec_stats
.base_r_uncorr_per_lanes
[i
];
2850 static void hclge_comm_get_fec_stats(struct hclge_dev
*hdev
,
2851 struct ethtool_fec_stats
*fec_stats
)
2853 u32 fec_mode
= hdev
->hw
.mac
.fec_mode
;
2856 case BIT(HNAE3_FEC_RS
):
2857 case BIT(HNAE3_FEC_LLRS
):
2858 hclge_get_fec_stats_total(hdev
, fec_stats
);
2860 case BIT(HNAE3_FEC_BASER
):
2861 hclge_get_fec_stats_lanes(hdev
, fec_stats
);
2864 dev_err(&hdev
->pdev
->dev
,
2865 "fec stats is not supported by current fec mode(0x%x)\n",
2871 static void hclge_get_fec_stats(struct hnae3_handle
*handle
,
2872 struct ethtool_fec_stats
*fec_stats
)
2874 struct hclge_vport
*vport
= hclge_get_vport(handle
);
2875 struct hclge_dev
*hdev
= vport
->back
;
2876 u32 fec_mode
= hdev
->hw
.mac
.fec_mode
;
2878 if (fec_mode
== BIT(HNAE3_FEC_NONE
) ||
2879 fec_mode
== BIT(HNAE3_FEC_AUTO
) ||
2880 fec_mode
== BIT(HNAE3_FEC_USER_DEF
))
2883 hclge_update_fec_stats(hdev
);
2885 hclge_comm_get_fec_stats(hdev
, fec_stats
);
2888 static int hclge_set_fec_hw(struct hclge_dev
*hdev
, u32 fec_mode
)
2890 struct hclge_config_fec_cmd
*req
;
2891 struct hclge_desc desc
;
2894 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_CONFIG_FEC_MODE
, false);
2896 req
= (struct hclge_config_fec_cmd
*)desc
.data
;
2897 if (fec_mode
& BIT(HNAE3_FEC_AUTO
))
2898 hnae3_set_bit(req
->fec_mode
, HCLGE_MAC_CFG_FEC_AUTO_EN_B
, 1);
2899 if (fec_mode
& BIT(HNAE3_FEC_RS
))
2900 hnae3_set_field(req
->fec_mode
, HCLGE_MAC_CFG_FEC_MODE_M
,
2901 HCLGE_MAC_CFG_FEC_MODE_S
, HCLGE_MAC_FEC_RS
);
2902 if (fec_mode
& BIT(HNAE3_FEC_LLRS
))
2903 hnae3_set_field(req
->fec_mode
, HCLGE_MAC_CFG_FEC_MODE_M
,
2904 HCLGE_MAC_CFG_FEC_MODE_S
, HCLGE_MAC_FEC_LLRS
);
2905 if (fec_mode
& BIT(HNAE3_FEC_BASER
))
2906 hnae3_set_field(req
->fec_mode
, HCLGE_MAC_CFG_FEC_MODE_M
,
2907 HCLGE_MAC_CFG_FEC_MODE_S
, HCLGE_MAC_FEC_BASER
);
2909 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
2911 dev_err(&hdev
->pdev
->dev
, "set fec mode failed %d.\n", ret
);
2916 static int hclge_set_fec(struct hnae3_handle
*handle
, u32 fec_mode
)
2918 struct hclge_vport
*vport
= hclge_get_vport(handle
);
2919 struct hclge_dev
*hdev
= vport
->back
;
2920 struct hclge_mac
*mac
= &hdev
->hw
.mac
;
2923 if (fec_mode
&& !(mac
->fec_ability
& fec_mode
)) {
2924 dev_err(&hdev
->pdev
->dev
, "unsupported fec mode\n");
2928 ret
= hclge_set_fec_hw(hdev
, fec_mode
);
2932 mac
->user_fec_mode
= fec_mode
| BIT(HNAE3_FEC_USER_DEF
);
2936 static void hclge_get_fec(struct hnae3_handle
*handle
, u8
*fec_ability
,
2939 struct hclge_vport
*vport
= hclge_get_vport(handle
);
2940 struct hclge_dev
*hdev
= vport
->back
;
2941 struct hclge_mac
*mac
= &hdev
->hw
.mac
;
2944 *fec_ability
= mac
->fec_ability
;
2946 *fec_mode
= mac
->fec_mode
;
2949 static int hclge_mac_init(struct hclge_dev
*hdev
)
2951 struct hclge_mac
*mac
= &hdev
->hw
.mac
;
2954 hdev
->support_sfp_query
= true;
2956 if (!test_bit(HCLGE_STATE_RST_HANDLING
, &hdev
->state
))
2957 hdev
->hw
.mac
.duplex
= HCLGE_MAC_FULL
;
2959 if (hdev
->hw
.mac
.support_autoneg
) {
2960 ret
= hclge_set_autoneg_en(hdev
, hdev
->hw
.mac
.autoneg
);
2965 if (!hdev
->hw
.mac
.autoneg
) {
2966 ret
= hclge_cfg_mac_speed_dup_hw(hdev
, hdev
->hw
.mac
.req_speed
,
2967 hdev
->hw
.mac
.req_duplex
,
2968 hdev
->hw
.mac
.lane_num
);
2975 if (mac
->user_fec_mode
& BIT(HNAE3_FEC_USER_DEF
)) {
2976 ret
= hclge_set_fec_hw(hdev
, mac
->user_fec_mode
);
2981 ret
= hclge_set_mac_mtu(hdev
, hdev
->mps
);
2983 dev_err(&hdev
->pdev
->dev
, "set mtu failed ret=%d\n", ret
);
2987 ret
= hclge_set_default_loopback(hdev
);
2991 ret
= hclge_buffer_alloc(hdev
);
2993 dev_err(&hdev
->pdev
->dev
,
2994 "allocate buffer fail, ret=%d\n", ret
);
2999 static void hclge_mbx_task_schedule(struct hclge_dev
*hdev
)
3001 if (!test_bit(HCLGE_STATE_REMOVING
, &hdev
->state
) &&
3002 !test_and_set_bit(HCLGE_STATE_MBX_SERVICE_SCHED
, &hdev
->state
)) {
3003 hdev
->last_mbx_scheduled
= jiffies
;
3004 mod_delayed_work(hclge_wq
, &hdev
->service_task
, 0);
3008 static void hclge_reset_task_schedule(struct hclge_dev
*hdev
)
3010 if (!test_bit(HCLGE_STATE_REMOVING
, &hdev
->state
) &&
3011 test_bit(HCLGE_STATE_SERVICE_INITED
, &hdev
->state
) &&
3012 !test_and_set_bit(HCLGE_STATE_RST_SERVICE_SCHED
, &hdev
->state
)) {
3013 hdev
->last_rst_scheduled
= jiffies
;
3014 mod_delayed_work(hclge_wq
, &hdev
->service_task
, 0);
3018 static void hclge_errhand_task_schedule(struct hclge_dev
*hdev
)
3020 if (!test_bit(HCLGE_STATE_REMOVING
, &hdev
->state
) &&
3021 !test_and_set_bit(HCLGE_STATE_ERR_SERVICE_SCHED
, &hdev
->state
))
3022 mod_delayed_work(hclge_wq
, &hdev
->service_task
, 0);
3025 void hclge_task_schedule(struct hclge_dev
*hdev
, unsigned long delay_time
)
3027 if (!test_bit(HCLGE_STATE_REMOVING
, &hdev
->state
) &&
3028 !test_bit(HCLGE_STATE_RST_FAIL
, &hdev
->state
))
3029 mod_delayed_work(hclge_wq
, &hdev
->service_task
, delay_time
);
3032 static int hclge_get_mac_link_status(struct hclge_dev
*hdev
, int *link_status
)
3034 struct hclge_link_status_cmd
*req
;
3035 struct hclge_desc desc
;
3038 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_QUERY_LINK_STATUS
, true);
3039 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
3041 dev_err(&hdev
->pdev
->dev
, "get link status cmd failed %d\n",
3046 req
= (struct hclge_link_status_cmd
*)desc
.data
;
3047 *link_status
= (req
->status
& HCLGE_LINK_STATUS_UP_M
) > 0 ?
3048 HCLGE_LINK_STATUS_UP
: HCLGE_LINK_STATUS_DOWN
;
3053 static int hclge_get_mac_phy_link(struct hclge_dev
*hdev
, int *link_status
)
3055 struct phy_device
*phydev
= hdev
->hw
.mac
.phydev
;
3057 *link_status
= HCLGE_LINK_STATUS_DOWN
;
3059 if (test_bit(HCLGE_STATE_DOWN
, &hdev
->state
))
3062 if (phydev
&& (phydev
->state
!= PHY_RUNNING
|| !phydev
->link
))
3065 return hclge_get_mac_link_status(hdev
, link_status
);
3068 static void hclge_push_link_status(struct hclge_dev
*hdev
)
3070 struct hclge_vport
*vport
;
3074 for (i
= 0; i
< pci_num_vf(hdev
->pdev
); i
++) {
3075 vport
= &hdev
->vport
[i
+ HCLGE_VF_VPORT_START_NUM
];
3077 if (!test_bit(HCLGE_VPORT_STATE_ALIVE
, &vport
->state
) ||
3078 vport
->vf_info
.link_state
!= IFLA_VF_LINK_STATE_AUTO
)
3081 ret
= hclge_push_vf_link_status(vport
);
3083 dev_err(&hdev
->pdev
->dev
,
3084 "failed to push link status to vf%u, ret = %d\n",
3090 static void hclge_update_link_status(struct hclge_dev
*hdev
)
3092 struct hnae3_handle
*handle
= &hdev
->vport
[0].nic
;
3093 struct hnae3_client
*client
= hdev
->nic_client
;
3100 if (test_and_set_bit(HCLGE_STATE_LINK_UPDATING
, &hdev
->state
))
3103 ret
= hclge_get_mac_phy_link(hdev
, &state
);
3105 clear_bit(HCLGE_STATE_LINK_UPDATING
, &hdev
->state
);
3109 if (state
!= hdev
->hw
.mac
.link
) {
3110 hdev
->hw
.mac
.link
= state
;
3111 if (state
== HCLGE_LINK_STATUS_UP
)
3112 hclge_update_port_info(hdev
);
3114 client
->ops
->link_status_change(handle
, state
);
3115 hclge_config_mac_tnl_int(hdev
, state
);
3117 if (test_bit(HCLGE_STATE_ROCE_REGISTERED
, &hdev
->state
)) {
3118 struct hnae3_handle
*rhandle
= &hdev
->vport
[0].roce
;
3119 struct hnae3_client
*rclient
= hdev
->roce_client
;
3121 if (rclient
&& rclient
->ops
->link_status_change
)
3122 rclient
->ops
->link_status_change(rhandle
,
3126 hclge_push_link_status(hdev
);
3129 clear_bit(HCLGE_STATE_LINK_UPDATING
, &hdev
->state
);
3132 static void hclge_update_speed_advertising(struct hclge_mac
*mac
)
3136 if (hclge_get_speed_bit(mac
->speed
, &speed_ability
))
3139 switch (mac
->module_type
) {
3140 case HNAE3_MODULE_TYPE_FIBRE_LR
:
3141 hclge_convert_setting_lr(speed_ability
, mac
->advertising
);
3143 case HNAE3_MODULE_TYPE_FIBRE_SR
:
3144 case HNAE3_MODULE_TYPE_AOC
:
3145 hclge_convert_setting_sr(speed_ability
, mac
->advertising
);
3147 case HNAE3_MODULE_TYPE_CR
:
3148 hclge_convert_setting_cr(speed_ability
, mac
->advertising
);
3150 case HNAE3_MODULE_TYPE_KR
:
3151 hclge_convert_setting_kr(speed_ability
, mac
->advertising
);
3158 static void hclge_update_fec_advertising(struct hclge_mac
*mac
)
3160 if (mac
->fec_mode
& BIT(HNAE3_FEC_RS
))
3161 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT
,
3163 else if (mac
->fec_mode
& BIT(HNAE3_FEC_LLRS
))
3164 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_LLRS_BIT
,
3166 else if (mac
->fec_mode
& BIT(HNAE3_FEC_BASER
))
3167 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT
,
3170 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT
,
3174 static void hclge_update_pause_advertising(struct hclge_dev
*hdev
)
3176 struct hclge_mac
*mac
= &hdev
->hw
.mac
;
3179 switch (hdev
->fc_mode_last_time
) {
3180 case HCLGE_FC_RX_PAUSE
:
3184 case HCLGE_FC_TX_PAUSE
:
3198 linkmode_set_pause(mac
->advertising
, tx_en
, rx_en
);
3201 static void hclge_update_advertising(struct hclge_dev
*hdev
)
3203 struct hclge_mac
*mac
= &hdev
->hw
.mac
;
3205 linkmode_zero(mac
->advertising
);
3206 hclge_update_speed_advertising(mac
);
3207 hclge_update_fec_advertising(mac
);
3208 hclge_update_pause_advertising(hdev
);
3211 static void hclge_update_port_capability(struct hclge_dev
*hdev
,
3212 struct hclge_mac
*mac
)
3214 if (hnae3_dev_fec_supported(hdev
))
3215 hclge_convert_setting_fec(mac
);
3217 /* firmware can not identify back plane type, the media type
3218 * read from configuration can help deal it
3220 if (mac
->media_type
== HNAE3_MEDIA_TYPE_BACKPLANE
&&
3221 mac
->module_type
== HNAE3_MODULE_TYPE_UNKNOWN
)
3222 mac
->module_type
= HNAE3_MODULE_TYPE_KR
;
3223 else if (mac
->media_type
== HNAE3_MEDIA_TYPE_COPPER
)
3224 mac
->module_type
= HNAE3_MODULE_TYPE_TP
;
3226 if (mac
->support_autoneg
) {
3227 linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT
, mac
->supported
);
3228 linkmode_copy(mac
->advertising
, mac
->supported
);
3230 linkmode_clear_bit(ETHTOOL_LINK_MODE_Autoneg_BIT
,
3232 hclge_update_advertising(hdev
);
3236 static int hclge_get_sfp_speed(struct hclge_dev
*hdev
, u32
*speed
)
3238 struct hclge_sfp_info_cmd
*resp
;
3239 struct hclge_desc desc
;
3242 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_GET_SFP_INFO
, true);
3243 resp
= (struct hclge_sfp_info_cmd
*)desc
.data
;
3244 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
3245 if (ret
== -EOPNOTSUPP
) {
3246 dev_warn(&hdev
->pdev
->dev
,
3247 "IMP do not support get SFP speed %d\n", ret
);
3250 dev_err(&hdev
->pdev
->dev
, "get sfp speed failed %d\n", ret
);
3254 *speed
= le32_to_cpu(resp
->speed
);
3259 static int hclge_get_sfp_info(struct hclge_dev
*hdev
, struct hclge_mac
*mac
)
3261 struct hclge_sfp_info_cmd
*resp
;
3262 struct hclge_desc desc
;
3265 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_GET_SFP_INFO
, true);
3266 resp
= (struct hclge_sfp_info_cmd
*)desc
.data
;
3268 resp
->query_type
= QUERY_ACTIVE_SPEED
;
3270 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
3271 if (ret
== -EOPNOTSUPP
) {
3272 dev_warn(&hdev
->pdev
->dev
,
3273 "IMP does not support get SFP info %d\n", ret
);
3276 dev_err(&hdev
->pdev
->dev
, "get sfp info failed %d\n", ret
);
3280 /* In some case, mac speed get from IMP may be 0, it shouldn't be
3281 * set to mac->speed.
3283 if (!le32_to_cpu(resp
->speed
))
3286 mac
->speed
= le32_to_cpu(resp
->speed
);
3287 /* if resp->speed_ability is 0, it means it's an old version
3288 * firmware, do not update these params
3290 if (resp
->speed_ability
) {
3291 mac
->module_type
= le32_to_cpu(resp
->module_type
);
3292 mac
->speed_ability
= le32_to_cpu(resp
->speed_ability
);
3293 mac
->autoneg
= resp
->autoneg
;
3294 mac
->support_autoneg
= resp
->autoneg_ability
;
3295 mac
->speed_type
= QUERY_ACTIVE_SPEED
;
3296 mac
->lane_num
= resp
->lane_num
;
3297 if (!resp
->active_fec
)
3300 mac
->fec_mode
= BIT(resp
->active_fec
);
3301 mac
->fec_ability
= resp
->fec_ability
;
3303 mac
->speed_type
= QUERY_SFP_SPEED
;
3309 static int hclge_get_phy_link_ksettings(struct hnae3_handle
*handle
,
3310 struct ethtool_link_ksettings
*cmd
)
3312 struct hclge_desc desc
[HCLGE_PHY_LINK_SETTING_BD_NUM
];
3313 struct hclge_vport
*vport
= hclge_get_vport(handle
);
3314 struct hclge_phy_link_ksetting_0_cmd
*req0
;
3315 struct hclge_phy_link_ksetting_1_cmd
*req1
;
3316 u32 supported
, advertising
, lp_advertising
;
3317 struct hclge_dev
*hdev
= vport
->back
;
3320 hclge_cmd_setup_basic_desc(&desc
[0], HCLGE_OPC_PHY_LINK_KSETTING
,
3322 desc
[0].flag
|= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT
);
3323 hclge_cmd_setup_basic_desc(&desc
[1], HCLGE_OPC_PHY_LINK_KSETTING
,
3326 ret
= hclge_cmd_send(&hdev
->hw
, desc
, HCLGE_PHY_LINK_SETTING_BD_NUM
);
3328 dev_err(&hdev
->pdev
->dev
,
3329 "failed to get phy link ksetting, ret = %d.\n", ret
);
3333 req0
= (struct hclge_phy_link_ksetting_0_cmd
*)desc
[0].data
;
3334 cmd
->base
.autoneg
= req0
->autoneg
;
3335 cmd
->base
.speed
= le32_to_cpu(req0
->speed
);
3336 cmd
->base
.duplex
= req0
->duplex
;
3337 cmd
->base
.port
= req0
->port
;
3338 cmd
->base
.transceiver
= req0
->transceiver
;
3339 cmd
->base
.phy_address
= req0
->phy_address
;
3340 cmd
->base
.eth_tp_mdix
= req0
->eth_tp_mdix
;
3341 cmd
->base
.eth_tp_mdix_ctrl
= req0
->eth_tp_mdix_ctrl
;
3342 supported
= le32_to_cpu(req0
->supported
);
3343 advertising
= le32_to_cpu(req0
->advertising
);
3344 lp_advertising
= le32_to_cpu(req0
->lp_advertising
);
3345 ethtool_convert_legacy_u32_to_link_mode(cmd
->link_modes
.supported
,
3347 ethtool_convert_legacy_u32_to_link_mode(cmd
->link_modes
.advertising
,
3349 ethtool_convert_legacy_u32_to_link_mode(cmd
->link_modes
.lp_advertising
,
3352 req1
= (struct hclge_phy_link_ksetting_1_cmd
*)desc
[1].data
;
3353 cmd
->base
.master_slave_cfg
= req1
->master_slave_cfg
;
3354 cmd
->base
.master_slave_state
= req1
->master_slave_state
;
3360 hclge_set_phy_link_ksettings(struct hnae3_handle
*handle
,
3361 const struct ethtool_link_ksettings
*cmd
)
3363 struct hclge_desc desc
[HCLGE_PHY_LINK_SETTING_BD_NUM
];
3364 struct hclge_vport
*vport
= hclge_get_vport(handle
);
3365 struct hclge_phy_link_ksetting_0_cmd
*req0
;
3366 struct hclge_phy_link_ksetting_1_cmd
*req1
;
3367 struct hclge_dev
*hdev
= vport
->back
;
3371 if (cmd
->base
.autoneg
== AUTONEG_DISABLE
&&
3372 ((cmd
->base
.speed
!= SPEED_100
&& cmd
->base
.speed
!= SPEED_10
) ||
3373 (cmd
->base
.duplex
!= DUPLEX_HALF
&&
3374 cmd
->base
.duplex
!= DUPLEX_FULL
)))
3377 hclge_cmd_setup_basic_desc(&desc
[0], HCLGE_OPC_PHY_LINK_KSETTING
,
3379 desc
[0].flag
|= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT
);
3380 hclge_cmd_setup_basic_desc(&desc
[1], HCLGE_OPC_PHY_LINK_KSETTING
,
3383 req0
= (struct hclge_phy_link_ksetting_0_cmd
*)desc
[0].data
;
3384 req0
->autoneg
= cmd
->base
.autoneg
;
3385 req0
->speed
= cpu_to_le32(cmd
->base
.speed
);
3386 req0
->duplex
= cmd
->base
.duplex
;
3387 ethtool_convert_link_mode_to_legacy_u32(&advertising
,
3388 cmd
->link_modes
.advertising
);
3389 req0
->advertising
= cpu_to_le32(advertising
);
3390 req0
->eth_tp_mdix_ctrl
= cmd
->base
.eth_tp_mdix_ctrl
;
3392 req1
= (struct hclge_phy_link_ksetting_1_cmd
*)desc
[1].data
;
3393 req1
->master_slave_cfg
= cmd
->base
.master_slave_cfg
;
3395 ret
= hclge_cmd_send(&hdev
->hw
, desc
, HCLGE_PHY_LINK_SETTING_BD_NUM
);
3397 dev_err(&hdev
->pdev
->dev
,
3398 "failed to set phy link ksettings, ret = %d.\n", ret
);
3402 hdev
->hw
.mac
.req_autoneg
= cmd
->base
.autoneg
;
3403 hdev
->hw
.mac
.req_speed
= cmd
->base
.speed
;
3404 hdev
->hw
.mac
.req_duplex
= cmd
->base
.duplex
;
3405 linkmode_copy(hdev
->hw
.mac
.advertising
, cmd
->link_modes
.advertising
);
3410 static int hclge_update_tp_port_info(struct hclge_dev
*hdev
)
3412 struct ethtool_link_ksettings cmd
;
3415 if (!hnae3_dev_phy_imp_supported(hdev
))
3418 ret
= hclge_get_phy_link_ksettings(&hdev
->vport
->nic
, &cmd
);
3422 hdev
->hw
.mac
.autoneg
= cmd
.base
.autoneg
;
3423 hdev
->hw
.mac
.speed
= cmd
.base
.speed
;
3424 hdev
->hw
.mac
.duplex
= cmd
.base
.duplex
;
3425 linkmode_copy(hdev
->hw
.mac
.advertising
, cmd
.link_modes
.advertising
);
3430 static int hclge_tp_port_init(struct hclge_dev
*hdev
)
3432 struct ethtool_link_ksettings cmd
;
3434 if (!hnae3_dev_phy_imp_supported(hdev
))
3437 cmd
.base
.autoneg
= hdev
->hw
.mac
.req_autoneg
;
3438 cmd
.base
.speed
= hdev
->hw
.mac
.req_speed
;
3439 cmd
.base
.duplex
= hdev
->hw
.mac
.req_duplex
;
3440 linkmode_copy(cmd
.link_modes
.advertising
, hdev
->hw
.mac
.advertising
);
3442 return hclge_set_phy_link_ksettings(&hdev
->vport
->nic
, &cmd
);
3445 static int hclge_update_port_info(struct hclge_dev
*hdev
)
3447 struct hclge_mac
*mac
= &hdev
->hw
.mac
;
3451 /* get the port info from SFP cmd if not copper port */
3452 if (mac
->media_type
== HNAE3_MEDIA_TYPE_COPPER
)
3453 return hclge_update_tp_port_info(hdev
);
3455 /* if IMP does not support get SFP/qSFP info, return directly */
3456 if (!hdev
->support_sfp_query
)
3459 if (hdev
->ae_dev
->dev_version
>= HNAE3_DEVICE_VERSION_V2
) {
3461 ret
= hclge_get_sfp_info(hdev
, mac
);
3463 speed
= HCLGE_MAC_SPEED_UNKNOWN
;
3464 ret
= hclge_get_sfp_speed(hdev
, &speed
);
3467 if (ret
== -EOPNOTSUPP
) {
3468 hdev
->support_sfp_query
= false;
3474 if (hdev
->ae_dev
->dev_version
>= HNAE3_DEVICE_VERSION_V2
) {
3475 if (mac
->speed_type
== QUERY_ACTIVE_SPEED
) {
3476 hclge_update_port_capability(hdev
, mac
);
3477 if (mac
->speed
!= speed
)
3478 (void)hclge_tm_port_shaper_cfg(hdev
);
3481 return hclge_cfg_mac_speed_dup(hdev
, mac
->speed
,
3482 HCLGE_MAC_FULL
, mac
->lane_num
);
3484 if (speed
== HCLGE_MAC_SPEED_UNKNOWN
)
3485 return 0; /* do nothing if no SFP */
3487 /* must config full duplex for SFP */
3488 return hclge_cfg_mac_speed_dup(hdev
, speed
, HCLGE_MAC_FULL
, 0);
3492 static int hclge_get_status(struct hnae3_handle
*handle
)
3494 struct hclge_vport
*vport
= hclge_get_vport(handle
);
3495 struct hclge_dev
*hdev
= vport
->back
;
3497 hclge_update_link_status(hdev
);
3499 return hdev
->hw
.mac
.link
;
3502 struct hclge_vport
*hclge_get_vf_vport(struct hclge_dev
*hdev
, int vf
)
3504 if (!pci_num_vf(hdev
->pdev
)) {
3505 dev_err(&hdev
->pdev
->dev
,
3506 "SRIOV is disabled, can not get vport(%d) info.\n", vf
);
3510 if (vf
< 0 || vf
>= pci_num_vf(hdev
->pdev
)) {
3511 dev_err(&hdev
->pdev
->dev
,
3512 "vf id(%d) is out of range(0 <= vfid < %d)\n",
3513 vf
, pci_num_vf(hdev
->pdev
));
3517 /* VF start from 1 in vport */
3518 vf
+= HCLGE_VF_VPORT_START_NUM
;
3519 return &hdev
->vport
[vf
];
3522 static int hclge_get_vf_config(struct hnae3_handle
*handle
, int vf
,
3523 struct ifla_vf_info
*ivf
)
3525 struct hclge_vport
*vport
= hclge_get_vport(handle
);
3526 struct hclge_dev
*hdev
= vport
->back
;
3528 vport
= hclge_get_vf_vport(hdev
, vf
);
3533 ivf
->linkstate
= vport
->vf_info
.link_state
;
3534 ivf
->spoofchk
= vport
->vf_info
.spoofchk
;
3535 ivf
->trusted
= vport
->vf_info
.trusted
;
3536 ivf
->min_tx_rate
= 0;
3537 ivf
->max_tx_rate
= vport
->vf_info
.max_tx_rate
;
3538 ivf
->vlan
= vport
->port_base_vlan_cfg
.vlan_info
.vlan_tag
;
3539 ivf
->vlan_proto
= htons(vport
->port_base_vlan_cfg
.vlan_info
.vlan_proto
);
3540 ivf
->qos
= vport
->port_base_vlan_cfg
.vlan_info
.qos
;
3541 ether_addr_copy(ivf
->mac
, vport
->vf_info
.mac
);
3546 static int hclge_set_vf_link_state(struct hnae3_handle
*handle
, int vf
,
3549 struct hclge_vport
*vport
= hclge_get_vport(handle
);
3550 struct hclge_dev
*hdev
= vport
->back
;
3554 vport
= hclge_get_vf_vport(hdev
, vf
);
3558 link_state_old
= vport
->vf_info
.link_state
;
3559 vport
->vf_info
.link_state
= link_state
;
3561 /* return success directly if the VF is unalive, VF will
3562 * query link state itself when it starts work.
3564 if (!test_bit(HCLGE_VPORT_STATE_ALIVE
, &vport
->state
))
3567 ret
= hclge_push_vf_link_status(vport
);
3569 vport
->vf_info
.link_state
= link_state_old
;
3570 dev_err(&hdev
->pdev
->dev
,
3571 "failed to push vf%d link status, ret = %d\n", vf
, ret
);
3577 static u32
hclge_check_event_cause(struct hclge_dev
*hdev
, u32
*clearval
)
3579 u32 cmdq_src_reg
, msix_src_reg
, hw_err_src_reg
;
3581 /* fetch the events from their corresponding regs */
3582 cmdq_src_reg
= hclge_read_dev(&hdev
->hw
, HCLGE_VECTOR0_CMDQ_SRC_REG
);
3583 msix_src_reg
= hclge_read_dev(&hdev
->hw
, HCLGE_MISC_VECTOR_INT_STS
);
3584 hw_err_src_reg
= hclge_read_dev(&hdev
->hw
,
3585 HCLGE_RAS_PF_OTHER_INT_STS_REG
);
3587 /* Assumption: If by any chance reset and mailbox events are reported
3588 * together then we will only process reset event in this go and will
3589 * defer the processing of the mailbox events. Since, we would have not
3590 * cleared RX CMDQ event this time we would receive again another
3591 * interrupt from H/W just for the mailbox.
3593 * check for vector0 reset event sources
3595 if (BIT(HCLGE_VECTOR0_IMPRESET_INT_B
) & msix_src_reg
) {
3596 dev_info(&hdev
->pdev
->dev
, "IMP reset interrupt\n");
3597 set_bit(HNAE3_IMP_RESET
, &hdev
->reset_pending
);
3598 set_bit(HCLGE_COMM_STATE_CMD_DISABLE
, &hdev
->hw
.hw
.comm_state
);
3599 *clearval
= BIT(HCLGE_VECTOR0_IMPRESET_INT_B
);
3600 hdev
->rst_stats
.imp_rst_cnt
++;
3601 return HCLGE_VECTOR0_EVENT_RST
;
3604 if (BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B
) & msix_src_reg
) {
3605 dev_info(&hdev
->pdev
->dev
, "global reset interrupt\n");
3606 set_bit(HCLGE_COMM_STATE_CMD_DISABLE
, &hdev
->hw
.hw
.comm_state
);
3607 set_bit(HNAE3_GLOBAL_RESET
, &hdev
->reset_pending
);
3608 *clearval
= BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B
);
3609 hdev
->rst_stats
.global_rst_cnt
++;
3610 return HCLGE_VECTOR0_EVENT_RST
;
3613 /* check for vector0 msix event and hardware error event source */
3614 if (msix_src_reg
& HCLGE_VECTOR0_REG_MSIX_MASK
||
3615 hw_err_src_reg
& HCLGE_RAS_REG_ERR_MASK
)
3616 return HCLGE_VECTOR0_EVENT_ERR
;
3618 /* check for vector0 ptp event source */
3619 if (BIT(HCLGE_VECTOR0_REG_PTP_INT_B
) & msix_src_reg
) {
3620 *clearval
= msix_src_reg
;
3621 return HCLGE_VECTOR0_EVENT_PTP
;
3624 /* check for vector0 mailbox(=CMDQ RX) event source */
3625 if (BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B
) & cmdq_src_reg
) {
3626 cmdq_src_reg
&= ~BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B
);
3627 *clearval
= cmdq_src_reg
;
3628 return HCLGE_VECTOR0_EVENT_MBX
;
3631 /* print other vector0 event source */
3632 dev_info(&hdev
->pdev
->dev
,
3633 "INT status: CMDQ(%#x) HW errors(%#x) other(%#x)\n",
3634 cmdq_src_reg
, hw_err_src_reg
, msix_src_reg
);
3636 return HCLGE_VECTOR0_EVENT_OTHER
;
3639 static void hclge_clear_event_cause(struct hclge_dev
*hdev
, u32 event_type
,
3642 #define HCLGE_IMP_RESET_DELAY 5
3644 switch (event_type
) {
3645 case HCLGE_VECTOR0_EVENT_PTP
:
3646 case HCLGE_VECTOR0_EVENT_RST
:
3647 if (regclr
== BIT(HCLGE_VECTOR0_IMPRESET_INT_B
))
3648 mdelay(HCLGE_IMP_RESET_DELAY
);
3650 hclge_write_dev(&hdev
->hw
, HCLGE_MISC_RESET_STS_REG
, regclr
);
3652 case HCLGE_VECTOR0_EVENT_MBX
:
3653 hclge_write_dev(&hdev
->hw
, HCLGE_VECTOR0_CMDQ_SRC_REG
, regclr
);
3660 static void hclge_clear_all_event_cause(struct hclge_dev
*hdev
)
3662 hclge_clear_event_cause(hdev
, HCLGE_VECTOR0_EVENT_RST
,
3663 BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B
) |
3664 BIT(HCLGE_VECTOR0_CORERESET_INT_B
) |
3665 BIT(HCLGE_VECTOR0_IMPRESET_INT_B
));
3666 hclge_clear_event_cause(hdev
, HCLGE_VECTOR0_EVENT_MBX
, 0);
3669 static void hclge_enable_vector(struct hclge_misc_vector
*vector
, bool enable
)
3671 writel(enable
? 1 : 0, vector
->addr
);
3674 static irqreturn_t
hclge_misc_irq_handle(int irq
, void *data
)
3676 struct hclge_dev
*hdev
= data
;
3677 unsigned long flags
;
3681 hclge_enable_vector(&hdev
->misc_vector
, false);
3682 event_cause
= hclge_check_event_cause(hdev
, &clearval
);
3684 /* vector 0 interrupt is shared with reset and mailbox source events. */
3685 switch (event_cause
) {
3686 case HCLGE_VECTOR0_EVENT_ERR
:
3687 hclge_errhand_task_schedule(hdev
);
3689 case HCLGE_VECTOR0_EVENT_RST
:
3690 hclge_reset_task_schedule(hdev
);
3692 case HCLGE_VECTOR0_EVENT_PTP
:
3693 spin_lock_irqsave(&hdev
->ptp
->lock
, flags
);
3694 hclge_ptp_clean_tx_hwts(hdev
);
3695 spin_unlock_irqrestore(&hdev
->ptp
->lock
, flags
);
3697 case HCLGE_VECTOR0_EVENT_MBX
:
3698 /* If we are here then,
3699 * 1. Either we are not handling any mbx task and we are not
3702 * 2. We could be handling a mbx task but nothing more is
3704 * In both cases, we should schedule mbx task as there are more
3705 * mbx messages reported by this interrupt.
3707 hclge_mbx_task_schedule(hdev
);
3710 dev_warn(&hdev
->pdev
->dev
,
3711 "received unknown or unhandled event of vector0\n");
3715 hclge_clear_event_cause(hdev
, event_cause
, clearval
);
3717 /* Enable interrupt if it is not caused by reset event or error event */
3718 if (event_cause
== HCLGE_VECTOR0_EVENT_PTP
||
3719 event_cause
== HCLGE_VECTOR0_EVENT_MBX
||
3720 event_cause
== HCLGE_VECTOR0_EVENT_OTHER
)
3721 hclge_enable_vector(&hdev
->misc_vector
, true);
3726 static void hclge_free_vector(struct hclge_dev
*hdev
, int vector_id
)
3728 if (hdev
->vector_status
[vector_id
] == HCLGE_INVALID_VPORT
) {
3729 dev_warn(&hdev
->pdev
->dev
,
3730 "vector(vector_id %d) has been freed.\n", vector_id
);
3734 hdev
->vector_status
[vector_id
] = HCLGE_INVALID_VPORT
;
3735 hdev
->num_msi_left
+= 1;
3736 hdev
->num_msi_used
-= 1;
3739 static void hclge_get_misc_vector(struct hclge_dev
*hdev
)
3741 struct hclge_misc_vector
*vector
= &hdev
->misc_vector
;
3743 vector
->vector_irq
= pci_irq_vector(hdev
->pdev
, 0);
3745 vector
->addr
= hdev
->hw
.hw
.io_base
+ HCLGE_MISC_VECTOR_REG_BASE
;
3746 hdev
->vector_status
[0] = 0;
3748 hdev
->num_msi_left
-= 1;
3749 hdev
->num_msi_used
+= 1;
3752 static int hclge_misc_irq_init(struct hclge_dev
*hdev
)
3756 hclge_get_misc_vector(hdev
);
3758 /* this would be explicitly freed in the end */
3759 snprintf(hdev
->misc_vector
.name
, HNAE3_INT_NAME_LEN
, "%s-misc-%s",
3760 HCLGE_NAME
, pci_name(hdev
->pdev
));
3761 ret
= request_irq(hdev
->misc_vector
.vector_irq
, hclge_misc_irq_handle
,
3762 0, hdev
->misc_vector
.name
, hdev
);
3764 hclge_free_vector(hdev
, 0);
3765 dev_err(&hdev
->pdev
->dev
, "request misc irq(%d) fail\n",
3766 hdev
->misc_vector
.vector_irq
);
3772 static void hclge_misc_irq_uninit(struct hclge_dev
*hdev
)
3774 free_irq(hdev
->misc_vector
.vector_irq
, hdev
);
3775 hclge_free_vector(hdev
, 0);
3778 int hclge_notify_client(struct hclge_dev
*hdev
,
3779 enum hnae3_reset_notify_type type
)
3781 struct hnae3_handle
*handle
= &hdev
->vport
[0].nic
;
3782 struct hnae3_client
*client
= hdev
->nic_client
;
3785 if (!test_bit(HCLGE_STATE_NIC_REGISTERED
, &hdev
->state
) || !client
)
3788 if (!client
->ops
->reset_notify
)
3791 ret
= client
->ops
->reset_notify(handle
, type
);
3793 dev_err(&hdev
->pdev
->dev
, "notify nic client failed %d(%d)\n",
3799 static int hclge_notify_roce_client(struct hclge_dev
*hdev
,
3800 enum hnae3_reset_notify_type type
)
3802 struct hnae3_handle
*handle
= &hdev
->vport
[0].roce
;
3803 struct hnae3_client
*client
= hdev
->roce_client
;
3806 if (!test_bit(HCLGE_STATE_ROCE_REGISTERED
, &hdev
->state
) || !client
)
3809 if (!client
->ops
->reset_notify
)
3812 ret
= client
->ops
->reset_notify(handle
, type
);
3814 dev_err(&hdev
->pdev
->dev
, "notify roce client failed %d(%d)",
3820 static int hclge_reset_wait(struct hclge_dev
*hdev
)
3822 #define HCLGE_RESET_WATI_MS 100
3823 #define HCLGE_RESET_WAIT_CNT 350
3825 u32 val
, reg
, reg_bit
;
3828 switch (hdev
->reset_type
) {
3829 case HNAE3_IMP_RESET
:
3830 reg
= HCLGE_GLOBAL_RESET_REG
;
3831 reg_bit
= HCLGE_IMP_RESET_BIT
;
3833 case HNAE3_GLOBAL_RESET
:
3834 reg
= HCLGE_GLOBAL_RESET_REG
;
3835 reg_bit
= HCLGE_GLOBAL_RESET_BIT
;
3837 case HNAE3_FUNC_RESET
:
3838 reg
= HCLGE_FUN_RST_ING
;
3839 reg_bit
= HCLGE_FUN_RST_ING_B
;
3842 dev_err(&hdev
->pdev
->dev
,
3843 "Wait for unsupported reset type: %d\n",
3848 val
= hclge_read_dev(&hdev
->hw
, reg
);
3849 while (hnae3_get_bit(val
, reg_bit
) && cnt
< HCLGE_RESET_WAIT_CNT
) {
3850 msleep(HCLGE_RESET_WATI_MS
);
3851 val
= hclge_read_dev(&hdev
->hw
, reg
);
3855 if (cnt
>= HCLGE_RESET_WAIT_CNT
) {
3856 dev_warn(&hdev
->pdev
->dev
,
3857 "Wait for reset timeout: %d\n", hdev
->reset_type
);
3864 static int hclge_set_vf_rst(struct hclge_dev
*hdev
, int func_id
, bool reset
)
3866 struct hclge_vf_rst_cmd
*req
;
3867 struct hclge_desc desc
;
3869 req
= (struct hclge_vf_rst_cmd
*)desc
.data
;
3870 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_GBL_RST_STATUS
, false);
3871 req
->dest_vfid
= func_id
;
3876 return hclge_cmd_send(&hdev
->hw
, &desc
, 1);
3879 static int hclge_set_all_vf_rst(struct hclge_dev
*hdev
, bool reset
)
3883 for (i
= HCLGE_VF_VPORT_START_NUM
; i
< hdev
->num_alloc_vport
; i
++) {
3884 struct hclge_vport
*vport
= &hdev
->vport
[i
];
3887 /* Send cmd to set/clear VF's FUNC_RST_ING */
3888 ret
= hclge_set_vf_rst(hdev
, vport
->vport_id
, reset
);
3890 dev_err(&hdev
->pdev
->dev
,
3891 "set vf(%u) rst failed %d!\n",
3892 vport
->vport_id
- HCLGE_VF_VPORT_START_NUM
,
3898 !test_bit(HCLGE_VPORT_STATE_INITED
, &vport
->state
))
3901 if (!test_bit(HCLGE_VPORT_STATE_ALIVE
, &vport
->state
) &&
3902 hdev
->reset_type
== HNAE3_FUNC_RESET
) {
3903 set_bit(HCLGE_VPORT_NEED_NOTIFY_RESET
,
3904 &vport
->need_notify
);
3908 /* Inform VF to process the reset.
3909 * hclge_inform_reset_assert_to_vf may fail if VF
3910 * driver is not loaded.
3912 ret
= hclge_inform_reset_assert_to_vf(vport
);
3914 dev_warn(&hdev
->pdev
->dev
,
3915 "inform reset to vf(%u) failed %d!\n",
3916 vport
->vport_id
- HCLGE_VF_VPORT_START_NUM
,
3923 static void hclge_mailbox_service_task(struct hclge_dev
*hdev
)
3925 if (!test_and_clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED
, &hdev
->state
) ||
3926 test_bit(HCLGE_COMM_STATE_CMD_DISABLE
, &hdev
->hw
.hw
.comm_state
) ||
3927 test_and_set_bit(HCLGE_STATE_MBX_HANDLING
, &hdev
->state
))
3930 if (time_is_before_jiffies(hdev
->last_mbx_scheduled
+
3931 HCLGE_MBX_SCHED_TIMEOUT
))
3932 dev_warn(&hdev
->pdev
->dev
,
3933 "mbx service task is scheduled after %ums on cpu%u!\n",
3934 jiffies_to_msecs(jiffies
- hdev
->last_mbx_scheduled
),
3935 smp_processor_id());
3937 hclge_mbx_handler(hdev
);
3939 clear_bit(HCLGE_STATE_MBX_HANDLING
, &hdev
->state
);
3942 static void hclge_func_reset_sync_vf(struct hclge_dev
*hdev
)
3944 struct hclge_pf_rst_sync_cmd
*req
;
3945 struct hclge_desc desc
;
3949 req
= (struct hclge_pf_rst_sync_cmd
*)desc
.data
;
3950 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_QUERY_VF_RST_RDY
, true);
3953 /* vf need to down netdev by mbx during PF or FLR reset */
3954 hclge_mailbox_service_task(hdev
);
3956 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
3957 /* for compatible with old firmware, wait
3958 * 100 ms for VF to stop IO
3960 if (ret
== -EOPNOTSUPP
) {
3961 msleep(HCLGE_RESET_SYNC_TIME
);
3964 dev_warn(&hdev
->pdev
->dev
, "sync with VF fail %d!\n",
3967 } else if (req
->all_vf_ready
) {
3970 msleep(HCLGE_PF_RESET_SYNC_TIME
);
3971 hclge_comm_cmd_reuse_desc(&desc
, true);
3972 } while (cnt
++ < HCLGE_PF_RESET_SYNC_CNT
);
3974 dev_warn(&hdev
->pdev
->dev
, "sync with VF timeout!\n");
3977 void hclge_report_hw_error(struct hclge_dev
*hdev
,
3978 enum hnae3_hw_error_type type
)
3980 struct hnae3_client
*client
= hdev
->nic_client
;
3982 if (!client
|| !client
->ops
->process_hw_error
||
3983 !test_bit(HCLGE_STATE_NIC_REGISTERED
, &hdev
->state
))
3986 client
->ops
->process_hw_error(&hdev
->vport
[0].nic
, type
);
3989 static void hclge_handle_imp_error(struct hclge_dev
*hdev
)
3993 reg_val
= hclge_read_dev(&hdev
->hw
, HCLGE_PF_OTHER_INT_REG
);
3994 if (reg_val
& BIT(HCLGE_VECTOR0_IMP_RD_POISON_B
)) {
3995 hclge_report_hw_error(hdev
, HNAE3_IMP_RD_POISON_ERROR
);
3996 reg_val
&= ~BIT(HCLGE_VECTOR0_IMP_RD_POISON_B
);
3997 hclge_write_dev(&hdev
->hw
, HCLGE_PF_OTHER_INT_REG
, reg_val
);
4000 if (reg_val
& BIT(HCLGE_VECTOR0_IMP_CMDQ_ERR_B
)) {
4001 hclge_report_hw_error(hdev
, HNAE3_CMDQ_ECC_ERROR
);
4002 reg_val
&= ~BIT(HCLGE_VECTOR0_IMP_CMDQ_ERR_B
);
4003 hclge_write_dev(&hdev
->hw
, HCLGE_PF_OTHER_INT_REG
, reg_val
);
4007 int hclge_func_reset_cmd(struct hclge_dev
*hdev
, int func_id
)
4009 struct hclge_desc desc
;
4010 struct hclge_reset_cmd
*req
= (struct hclge_reset_cmd
*)desc
.data
;
4013 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_CFG_RST_TRIGGER
, false);
4014 hnae3_set_bit(req
->mac_func_reset
, HCLGE_CFG_RESET_FUNC_B
, 1);
4015 req
->fun_reset_vfid
= func_id
;
4017 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
4019 dev_err(&hdev
->pdev
->dev
,
4020 "send function reset cmd fail, status =%d\n", ret
);
4025 static void hclge_do_reset(struct hclge_dev
*hdev
)
4027 struct hnae3_handle
*handle
= &hdev
->vport
[0].nic
;
4028 struct pci_dev
*pdev
= hdev
->pdev
;
4031 if (hclge_get_hw_reset_stat(handle
)) {
4032 dev_info(&pdev
->dev
, "hardware reset not finish\n");
4033 dev_info(&pdev
->dev
, "func_rst_reg:0x%x, global_rst_reg:0x%x\n",
4034 hclge_read_dev(&hdev
->hw
, HCLGE_FUN_RST_ING
),
4035 hclge_read_dev(&hdev
->hw
, HCLGE_GLOBAL_RESET_REG
));
4039 switch (hdev
->reset_type
) {
4040 case HNAE3_IMP_RESET
:
4041 dev_info(&pdev
->dev
, "IMP reset requested\n");
4042 val
= hclge_read_dev(&hdev
->hw
, HCLGE_PF_OTHER_INT_REG
);
4043 hnae3_set_bit(val
, HCLGE_TRIGGER_IMP_RESET_B
, 1);
4044 hclge_write_dev(&hdev
->hw
, HCLGE_PF_OTHER_INT_REG
, val
);
4046 case HNAE3_GLOBAL_RESET
:
4047 dev_info(&pdev
->dev
, "global reset requested\n");
4048 val
= hclge_read_dev(&hdev
->hw
, HCLGE_GLOBAL_RESET_REG
);
4049 hnae3_set_bit(val
, HCLGE_GLOBAL_RESET_BIT
, 1);
4050 hclge_write_dev(&hdev
->hw
, HCLGE_GLOBAL_RESET_REG
, val
);
4052 case HNAE3_FUNC_RESET
:
4053 dev_info(&pdev
->dev
, "PF reset requested\n");
4054 /* schedule again to check later */
4055 set_bit(HNAE3_FUNC_RESET
, &hdev
->reset_pending
);
4056 hclge_reset_task_schedule(hdev
);
4059 dev_warn(&pdev
->dev
,
4060 "unsupported reset type: %d\n", hdev
->reset_type
);
4065 static enum hnae3_reset_type
hclge_get_reset_level(struct hnae3_ae_dev
*ae_dev
,
4066 unsigned long *addr
)
4068 enum hnae3_reset_type rst_level
= HNAE3_NONE_RESET
;
4069 struct hclge_dev
*hdev
= ae_dev
->priv
;
4071 /* return the highest priority reset level amongst all */
4072 if (test_bit(HNAE3_IMP_RESET
, addr
)) {
4073 rst_level
= HNAE3_IMP_RESET
;
4074 clear_bit(HNAE3_IMP_RESET
, addr
);
4075 clear_bit(HNAE3_GLOBAL_RESET
, addr
);
4076 clear_bit(HNAE3_FUNC_RESET
, addr
);
4077 } else if (test_bit(HNAE3_GLOBAL_RESET
, addr
)) {
4078 rst_level
= HNAE3_GLOBAL_RESET
;
4079 clear_bit(HNAE3_GLOBAL_RESET
, addr
);
4080 clear_bit(HNAE3_FUNC_RESET
, addr
);
4081 } else if (test_bit(HNAE3_FUNC_RESET
, addr
)) {
4082 rst_level
= HNAE3_FUNC_RESET
;
4083 clear_bit(HNAE3_FUNC_RESET
, addr
);
4084 } else if (test_bit(HNAE3_FLR_RESET
, addr
)) {
4085 rst_level
= HNAE3_FLR_RESET
;
4086 clear_bit(HNAE3_FLR_RESET
, addr
);
4089 if (hdev
->reset_type
!= HNAE3_NONE_RESET
&&
4090 rst_level
< hdev
->reset_type
)
4091 return HNAE3_NONE_RESET
;
4096 static void hclge_clear_reset_cause(struct hclge_dev
*hdev
)
4100 switch (hdev
->reset_type
) {
4101 case HNAE3_IMP_RESET
:
4102 clearval
= BIT(HCLGE_VECTOR0_IMPRESET_INT_B
);
4104 case HNAE3_GLOBAL_RESET
:
4105 clearval
= BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B
);
4114 /* For revision 0x20, the reset interrupt source
4115 * can only be cleared after hardware reset done
4117 if (hdev
->ae_dev
->dev_version
< HNAE3_DEVICE_VERSION_V2
)
4118 hclge_write_dev(&hdev
->hw
, HCLGE_MISC_RESET_STS_REG
,
4121 hclge_enable_vector(&hdev
->misc_vector
, true);
4124 static void hclge_reset_handshake(struct hclge_dev
*hdev
, bool enable
)
4128 reg_val
= hclge_read_dev(&hdev
->hw
, HCLGE_COMM_NIC_CSQ_DEPTH_REG
);
4130 reg_val
|= HCLGE_COMM_NIC_SW_RST_RDY
;
4132 reg_val
&= ~HCLGE_COMM_NIC_SW_RST_RDY
;
4134 hclge_write_dev(&hdev
->hw
, HCLGE_COMM_NIC_CSQ_DEPTH_REG
, reg_val
);
4137 static int hclge_func_reset_notify_vf(struct hclge_dev
*hdev
)
4141 ret
= hclge_set_all_vf_rst(hdev
, true);
4145 hclge_func_reset_sync_vf(hdev
);
4150 static int hclge_reset_prepare_wait(struct hclge_dev
*hdev
)
4155 switch (hdev
->reset_type
) {
4156 case HNAE3_FUNC_RESET
:
4157 ret
= hclge_func_reset_notify_vf(hdev
);
4161 ret
= hclge_func_reset_cmd(hdev
, 0);
4163 dev_err(&hdev
->pdev
->dev
,
4164 "asserting function reset fail %d!\n", ret
);
4168 /* After performaning pf reset, it is not necessary to do the
4169 * mailbox handling or send any command to firmware, because
4170 * any mailbox handling or command to firmware is only valid
4171 * after hclge_comm_cmd_init is called.
4173 set_bit(HCLGE_COMM_STATE_CMD_DISABLE
, &hdev
->hw
.hw
.comm_state
);
4174 hdev
->rst_stats
.pf_rst_cnt
++;
4176 case HNAE3_FLR_RESET
:
4177 ret
= hclge_func_reset_notify_vf(hdev
);
4181 case HNAE3_IMP_RESET
:
4182 hclge_handle_imp_error(hdev
);
4183 reg_val
= hclge_read_dev(&hdev
->hw
, HCLGE_PF_OTHER_INT_REG
);
4184 hclge_write_dev(&hdev
->hw
, HCLGE_PF_OTHER_INT_REG
,
4185 BIT(HCLGE_VECTOR0_IMP_RESET_INT_B
) | reg_val
);
4191 /* inform hardware that preparatory work is done */
4192 msleep(HCLGE_RESET_SYNC_TIME
);
4193 hclge_reset_handshake(hdev
, true);
4194 dev_info(&hdev
->pdev
->dev
, "prepare wait ok\n");
4199 static void hclge_show_rst_info(struct hclge_dev
*hdev
)
4203 buf
= kzalloc(HCLGE_DBG_RESET_INFO_LEN
, GFP_KERNEL
);
4207 hclge_dbg_dump_rst_info(hdev
, buf
, HCLGE_DBG_RESET_INFO_LEN
);
4209 dev_info(&hdev
->pdev
->dev
, "dump reset info:\n%s", buf
);
4214 static bool hclge_reset_err_handle(struct hclge_dev
*hdev
)
4216 #define MAX_RESET_FAIL_CNT 5
4218 if (hdev
->reset_pending
) {
4219 dev_info(&hdev
->pdev
->dev
, "Reset pending %lu\n",
4220 hdev
->reset_pending
);
4222 } else if (hclge_read_dev(&hdev
->hw
, HCLGE_MISC_VECTOR_INT_STS
) &
4223 HCLGE_RESET_INT_M
) {
4224 dev_info(&hdev
->pdev
->dev
,
4225 "reset failed because new reset interrupt\n");
4226 hclge_clear_reset_cause(hdev
);
4228 } else if (hdev
->rst_stats
.reset_fail_cnt
< MAX_RESET_FAIL_CNT
) {
4229 hdev
->rst_stats
.reset_fail_cnt
++;
4230 set_bit(hdev
->reset_type
, &hdev
->reset_pending
);
4231 dev_info(&hdev
->pdev
->dev
,
4232 "re-schedule reset task(%u)\n",
4233 hdev
->rst_stats
.reset_fail_cnt
);
4237 hclge_clear_reset_cause(hdev
);
4239 /* recover the handshake status when reset fail */
4240 hclge_reset_handshake(hdev
, true);
4242 dev_err(&hdev
->pdev
->dev
, "Reset fail!\n");
4244 hclge_show_rst_info(hdev
);
4246 set_bit(HCLGE_STATE_RST_FAIL
, &hdev
->state
);
4251 static void hclge_update_reset_level(struct hclge_dev
*hdev
)
4253 struct hnae3_ae_dev
*ae_dev
= pci_get_drvdata(hdev
->pdev
);
4254 enum hnae3_reset_type reset_level
;
4256 /* reset request will not be set during reset, so clear
4257 * pending reset request to avoid unnecessary reset
4258 * caused by the same reason.
4260 hclge_get_reset_level(ae_dev
, &hdev
->reset_request
);
4262 /* if default_reset_request has a higher level reset request,
4263 * it should be handled as soon as possible. since some errors
4264 * need this kind of reset to fix.
4266 reset_level
= hclge_get_reset_level(ae_dev
,
4267 &hdev
->default_reset_request
);
4268 if (reset_level
!= HNAE3_NONE_RESET
)
4269 set_bit(reset_level
, &hdev
->reset_request
);
4272 static int hclge_set_rst_done(struct hclge_dev
*hdev
)
4274 struct hclge_pf_rst_done_cmd
*req
;
4275 struct hclge_desc desc
;
4278 req
= (struct hclge_pf_rst_done_cmd
*)desc
.data
;
4279 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_PF_RST_DONE
, false);
4280 req
->pf_rst_done
|= HCLGE_PF_RESET_DONE_BIT
;
4282 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
4283 /* To be compatible with the old firmware, which does not support
4284 * command HCLGE_OPC_PF_RST_DONE, just print a warning and
4287 if (ret
== -EOPNOTSUPP
) {
4288 dev_warn(&hdev
->pdev
->dev
,
4289 "current firmware does not support command(0x%x)!\n",
4290 HCLGE_OPC_PF_RST_DONE
);
4293 dev_err(&hdev
->pdev
->dev
, "assert PF reset done fail %d!\n",
4300 static int hclge_reset_prepare_up(struct hclge_dev
*hdev
)
4304 switch (hdev
->reset_type
) {
4305 case HNAE3_FUNC_RESET
:
4306 case HNAE3_FLR_RESET
:
4307 ret
= hclge_set_all_vf_rst(hdev
, false);
4309 case HNAE3_GLOBAL_RESET
:
4310 case HNAE3_IMP_RESET
:
4311 ret
= hclge_set_rst_done(hdev
);
4317 /* clear up the handshake status after re-initialize done */
4318 hclge_reset_handshake(hdev
, false);
4323 static int hclge_reset_stack(struct hclge_dev
*hdev
)
4327 ret
= hclge_notify_client(hdev
, HNAE3_UNINIT_CLIENT
);
4331 ret
= hclge_reset_ae_dev(hdev
->ae_dev
);
4335 return hclge_notify_client(hdev
, HNAE3_INIT_CLIENT
);
4338 static int hclge_reset_prepare(struct hclge_dev
*hdev
)
4342 hdev
->rst_stats
.reset_cnt
++;
4343 /* perform reset of the stack & ae device for a client */
4344 ret
= hclge_notify_roce_client(hdev
, HNAE3_DOWN_CLIENT
);
4349 ret
= hclge_notify_client(hdev
, HNAE3_DOWN_CLIENT
);
4354 return hclge_reset_prepare_wait(hdev
);
4357 static int hclge_reset_rebuild(struct hclge_dev
*hdev
)
4361 hdev
->rst_stats
.hw_reset_done_cnt
++;
4363 ret
= hclge_notify_roce_client(hdev
, HNAE3_UNINIT_CLIENT
);
4368 ret
= hclge_reset_stack(hdev
);
4373 hclge_clear_reset_cause(hdev
);
4375 ret
= hclge_notify_roce_client(hdev
, HNAE3_INIT_CLIENT
);
4376 /* ignore RoCE notify error if it fails HCLGE_RESET_MAX_FAIL_CNT - 1
4380 hdev
->rst_stats
.reset_fail_cnt
< HCLGE_RESET_MAX_FAIL_CNT
- 1)
4383 ret
= hclge_reset_prepare_up(hdev
);
4388 ret
= hclge_notify_client(hdev
, HNAE3_UP_CLIENT
);
4393 ret
= hclge_notify_roce_client(hdev
, HNAE3_UP_CLIENT
);
4397 hdev
->last_reset_time
= jiffies
;
4398 hdev
->rst_stats
.reset_fail_cnt
= 0;
4399 hdev
->rst_stats
.reset_done_cnt
++;
4400 clear_bit(HCLGE_STATE_RST_FAIL
, &hdev
->state
);
4402 hclge_update_reset_level(hdev
);
4407 static void hclge_reset(struct hclge_dev
*hdev
)
4409 if (hclge_reset_prepare(hdev
))
4412 if (hclge_reset_wait(hdev
))
4415 if (hclge_reset_rebuild(hdev
))
4421 if (hclge_reset_err_handle(hdev
))
4422 hclge_reset_task_schedule(hdev
);
4425 static void hclge_reset_event(struct pci_dev
*pdev
, struct hnae3_handle
*handle
)
4427 struct hnae3_ae_dev
*ae_dev
= pci_get_drvdata(pdev
);
4428 struct hclge_dev
*hdev
= ae_dev
->priv
;
4430 /* We might end up getting called broadly because of 2 below cases:
4431 * 1. Recoverable error was conveyed through APEI and only way to bring
4432 * normalcy is to reset.
4433 * 2. A new reset request from the stack due to timeout
4435 * check if this is a new reset request and we are not here just because
4436 * last reset attempt did not succeed and watchdog hit us again. We will
4437 * know this if last reset request did not occur very recently (watchdog
4438 * timer = 5*HZ, let us check after sufficiently large time, say 4*5*Hz)
4439 * In case of new request we reset the "reset level" to PF reset.
4440 * And if it is a repeat reset request of the most recent one then we
4441 * want to make sure we throttle the reset request. Therefore, we will
4442 * not allow it again before 3*HZ times.
4445 if (time_before(jiffies
, (hdev
->last_reset_time
+
4446 HCLGE_RESET_INTERVAL
))) {
4447 mod_timer(&hdev
->reset_timer
, jiffies
+ HCLGE_RESET_INTERVAL
);
4451 if (hdev
->default_reset_request
) {
4453 hclge_get_reset_level(ae_dev
,
4454 &hdev
->default_reset_request
);
4455 } else if (time_after(jiffies
, (hdev
->last_reset_time
+ 4 * 5 * HZ
))) {
4456 hdev
->reset_level
= HNAE3_FUNC_RESET
;
4459 dev_info(&hdev
->pdev
->dev
, "received reset event, reset type is %d\n",
4462 /* request reset & schedule reset task */
4463 set_bit(hdev
->reset_level
, &hdev
->reset_request
);
4464 hclge_reset_task_schedule(hdev
);
4466 if (hdev
->reset_level
< HNAE3_GLOBAL_RESET
)
4467 hdev
->reset_level
++;
4470 static void hclge_set_def_reset_request(struct hnae3_ae_dev
*ae_dev
,
4471 enum hnae3_reset_type rst_type
)
4473 struct hclge_dev
*hdev
= ae_dev
->priv
;
4475 set_bit(rst_type
, &hdev
->default_reset_request
);
4478 static void hclge_reset_timer(struct timer_list
*t
)
4480 struct hclge_dev
*hdev
= from_timer(hdev
, t
, reset_timer
);
4482 /* if default_reset_request has no value, it means that this reset
4483 * request has already be handled, so just return here
4485 if (!hdev
->default_reset_request
)
4488 dev_info(&hdev
->pdev
->dev
,
4489 "triggering reset in reset timer\n");
4490 hclge_reset_event(hdev
->pdev
, NULL
);
4493 static void hclge_reset_subtask(struct hclge_dev
*hdev
)
4495 struct hnae3_ae_dev
*ae_dev
= pci_get_drvdata(hdev
->pdev
);
4497 /* check if there is any ongoing reset in the hardware. This status can
4498 * be checked from reset_pending. If there is then, we need to wait for
4499 * hardware to complete reset.
4500 * a. If we are able to figure out in reasonable time that hardware
4501 * has fully resetted then, we can proceed with driver, client
4503 * b. else, we can come back later to check this status so re-sched
4506 hdev
->last_reset_time
= jiffies
;
4507 hdev
->reset_type
= hclge_get_reset_level(ae_dev
, &hdev
->reset_pending
);
4508 if (hdev
->reset_type
!= HNAE3_NONE_RESET
)
4511 /* check if we got any *new* reset requests to be honored */
4512 hdev
->reset_type
= hclge_get_reset_level(ae_dev
, &hdev
->reset_request
);
4513 if (hdev
->reset_type
!= HNAE3_NONE_RESET
)
4514 hclge_do_reset(hdev
);
4516 hdev
->reset_type
= HNAE3_NONE_RESET
;
4519 static void hclge_handle_err_reset_request(struct hclge_dev
*hdev
)
4521 struct hnae3_ae_dev
*ae_dev
= pci_get_drvdata(hdev
->pdev
);
4522 enum hnae3_reset_type reset_type
;
4524 if (ae_dev
->hw_err_reset_req
) {
4525 reset_type
= hclge_get_reset_level(ae_dev
,
4526 &ae_dev
->hw_err_reset_req
);
4527 hclge_set_def_reset_request(ae_dev
, reset_type
);
4530 if (hdev
->default_reset_request
&& ae_dev
->ops
->reset_event
)
4531 ae_dev
->ops
->reset_event(hdev
->pdev
, NULL
);
4533 /* enable interrupt after error handling complete */
4534 hclge_enable_vector(&hdev
->misc_vector
, true);
4537 static void hclge_handle_err_recovery(struct hclge_dev
*hdev
)
4539 struct hnae3_ae_dev
*ae_dev
= pci_get_drvdata(hdev
->pdev
);
4541 ae_dev
->hw_err_reset_req
= 0;
4543 if (hclge_find_error_source(hdev
)) {
4544 hclge_handle_error_info_log(ae_dev
);
4545 hclge_handle_mac_tnl(hdev
);
4546 hclge_handle_vf_queue_err_ras(hdev
);
4549 hclge_handle_err_reset_request(hdev
);
4552 static void hclge_misc_err_recovery(struct hclge_dev
*hdev
)
4554 struct hnae3_ae_dev
*ae_dev
= pci_get_drvdata(hdev
->pdev
);
4555 struct device
*dev
= &hdev
->pdev
->dev
;
4558 msix_sts_reg
= hclge_read_dev(&hdev
->hw
, HCLGE_MISC_VECTOR_INT_STS
);
4559 if (msix_sts_reg
& HCLGE_VECTOR0_REG_MSIX_MASK
) {
4560 if (hclge_handle_hw_msix_error
4561 (hdev
, &hdev
->default_reset_request
))
4562 dev_info(dev
, "received msix interrupt 0x%x\n",
4566 hclge_handle_hw_ras_error(ae_dev
);
4568 hclge_handle_err_reset_request(hdev
);
4571 static void hclge_errhand_service_task(struct hclge_dev
*hdev
)
4573 if (!test_and_clear_bit(HCLGE_STATE_ERR_SERVICE_SCHED
, &hdev
->state
))
4576 if (hnae3_dev_ras_imp_supported(hdev
))
4577 hclge_handle_err_recovery(hdev
);
4579 hclge_misc_err_recovery(hdev
);
4582 static void hclge_reset_service_task(struct hclge_dev
*hdev
)
4584 if (!test_and_clear_bit(HCLGE_STATE_RST_SERVICE_SCHED
, &hdev
->state
))
4587 if (time_is_before_jiffies(hdev
->last_rst_scheduled
+
4588 HCLGE_RESET_SCHED_TIMEOUT
))
4589 dev_warn(&hdev
->pdev
->dev
,
4590 "reset service task is scheduled after %ums on cpu%u!\n",
4591 jiffies_to_msecs(jiffies
- hdev
->last_rst_scheduled
),
4592 smp_processor_id());
4594 down(&hdev
->reset_sem
);
4595 set_bit(HCLGE_STATE_RST_HANDLING
, &hdev
->state
);
4597 hclge_reset_subtask(hdev
);
4599 clear_bit(HCLGE_STATE_RST_HANDLING
, &hdev
->state
);
4600 up(&hdev
->reset_sem
);
4603 static void hclge_update_vport_alive(struct hclge_dev
*hdev
)
4605 #define HCLGE_ALIVE_SECONDS_NORMAL 8
4607 unsigned long alive_time
= HCLGE_ALIVE_SECONDS_NORMAL
* HZ
;
4610 /* start from vport 1 for PF is always alive */
4611 for (i
= 1; i
< hdev
->num_alloc_vport
; i
++) {
4612 struct hclge_vport
*vport
= &hdev
->vport
[i
];
4614 if (!test_bit(HCLGE_VPORT_STATE_INITED
, &vport
->state
) ||
4615 !test_bit(HCLGE_VPORT_STATE_ALIVE
, &vport
->state
))
4617 if (time_after(jiffies
, vport
->last_active_jiffies
+
4619 clear_bit(HCLGE_VPORT_STATE_ALIVE
, &vport
->state
);
4620 dev_warn(&hdev
->pdev
->dev
,
4621 "VF %u heartbeat timeout\n",
4622 i
- HCLGE_VF_VPORT_START_NUM
);
4627 static void hclge_periodic_service_task(struct hclge_dev
*hdev
)
4629 unsigned long delta
= round_jiffies_relative(HZ
);
4631 if (test_bit(HCLGE_STATE_RST_FAIL
, &hdev
->state
))
4634 /* Always handle the link updating to make sure link state is
4635 * updated when it is triggered by mbx.
4637 hclge_update_link_status(hdev
);
4638 hclge_sync_mac_table(hdev
);
4639 hclge_sync_promisc_mode(hdev
);
4640 hclge_sync_fd_table(hdev
);
4642 if (time_is_after_jiffies(hdev
->last_serv_processed
+ HZ
)) {
4643 delta
= jiffies
- hdev
->last_serv_processed
;
4645 if (delta
< round_jiffies_relative(HZ
)) {
4646 delta
= round_jiffies_relative(HZ
) - delta
;
4651 hdev
->serv_processed_cnt
++;
4652 hclge_update_vport_alive(hdev
);
4654 if (test_bit(HCLGE_STATE_DOWN
, &hdev
->state
)) {
4655 hdev
->last_serv_processed
= jiffies
;
4659 if (!(hdev
->serv_processed_cnt
% HCLGE_STATS_TIMER_INTERVAL
))
4660 hclge_update_stats_for_all(hdev
);
4662 hclge_update_port_info(hdev
);
4663 hclge_sync_vlan_filter(hdev
);
4665 if (!(hdev
->serv_processed_cnt
% HCLGE_ARFS_EXPIRE_INTERVAL
))
4666 hclge_rfs_filter_expire(hdev
);
4668 hdev
->last_serv_processed
= jiffies
;
4671 hclge_task_schedule(hdev
, delta
);
4674 static void hclge_ptp_service_task(struct hclge_dev
*hdev
)
4676 unsigned long flags
;
4678 if (!test_bit(HCLGE_STATE_PTP_EN
, &hdev
->state
) ||
4679 !test_bit(HCLGE_STATE_PTP_TX_HANDLING
, &hdev
->state
) ||
4680 !time_is_before_jiffies(hdev
->ptp
->tx_start
+ HZ
))
4683 /* to prevent concurrence with the irq handler */
4684 spin_lock_irqsave(&hdev
->ptp
->lock
, flags
);
4686 /* check HCLGE_STATE_PTP_TX_HANDLING here again, since the irq
4687 * handler may handle it just before spin_lock_irqsave().
4689 if (test_bit(HCLGE_STATE_PTP_TX_HANDLING
, &hdev
->state
))
4690 hclge_ptp_clean_tx_hwts(hdev
);
4692 spin_unlock_irqrestore(&hdev
->ptp
->lock
, flags
);
4695 static void hclge_service_task(struct work_struct
*work
)
4697 struct hclge_dev
*hdev
=
4698 container_of(work
, struct hclge_dev
, service_task
.work
);
4700 hclge_errhand_service_task(hdev
);
4701 hclge_reset_service_task(hdev
);
4702 hclge_ptp_service_task(hdev
);
4703 hclge_mailbox_service_task(hdev
);
4704 hclge_periodic_service_task(hdev
);
4706 /* Handle error recovery, reset and mbx again in case periodical task
4707 * delays the handling by calling hclge_task_schedule() in
4708 * hclge_periodic_service_task().
4710 hclge_errhand_service_task(hdev
);
4711 hclge_reset_service_task(hdev
);
4712 hclge_mailbox_service_task(hdev
);
4715 struct hclge_vport
*hclge_get_vport(struct hnae3_handle
*handle
)
4717 /* VF handle has no client */
4718 if (!handle
->client
)
4719 return container_of(handle
, struct hclge_vport
, nic
);
4720 else if (handle
->client
->type
== HNAE3_CLIENT_ROCE
)
4721 return container_of(handle
, struct hclge_vport
, roce
);
4723 return container_of(handle
, struct hclge_vport
, nic
);
4726 static void hclge_get_vector_info(struct hclge_dev
*hdev
, u16 idx
,
4727 struct hnae3_vector_info
*vector_info
)
4729 #define HCLGE_PF_MAX_VECTOR_NUM_DEV_V2 64
4731 vector_info
->vector
= pci_irq_vector(hdev
->pdev
, idx
);
4733 /* need an extend offset to config vector >= 64 */
4734 if (idx
- 1 < HCLGE_PF_MAX_VECTOR_NUM_DEV_V2
)
4735 vector_info
->io_addr
= hdev
->hw
.hw
.io_base
+
4736 HCLGE_VECTOR_REG_BASE
+
4737 (idx
- 1) * HCLGE_VECTOR_REG_OFFSET
;
4739 vector_info
->io_addr
= hdev
->hw
.hw
.io_base
+
4740 HCLGE_VECTOR_EXT_REG_BASE
+
4741 (idx
- 1) / HCLGE_PF_MAX_VECTOR_NUM_DEV_V2
*
4742 HCLGE_VECTOR_REG_OFFSET_H
+
4743 (idx
- 1) % HCLGE_PF_MAX_VECTOR_NUM_DEV_V2
*
4744 HCLGE_VECTOR_REG_OFFSET
;
4746 hdev
->vector_status
[idx
] = hdev
->vport
[0].vport_id
;
4747 hdev
->vector_irq
[idx
] = vector_info
->vector
;
4750 static int hclge_get_vector(struct hnae3_handle
*handle
, u16 vector_num
,
4751 struct hnae3_vector_info
*vector_info
)
4753 struct hclge_vport
*vport
= hclge_get_vport(handle
);
4754 struct hnae3_vector_info
*vector
= vector_info
;
4755 struct hclge_dev
*hdev
= vport
->back
;
4760 vector_num
= min_t(u16
, hdev
->num_nic_msi
- 1, vector_num
);
4761 vector_num
= min(hdev
->num_msi_left
, vector_num
);
4763 for (j
= 0; j
< vector_num
; j
++) {
4764 while (++i
< hdev
->num_nic_msi
) {
4765 if (hdev
->vector_status
[i
] == HCLGE_INVALID_VPORT
) {
4766 hclge_get_vector_info(hdev
, i
, vector
);
4774 hdev
->num_msi_left
-= alloc
;
4775 hdev
->num_msi_used
+= alloc
;
4780 static int hclge_get_vector_index(struct hclge_dev
*hdev
, int vector
)
4784 for (i
= 0; i
< hdev
->num_msi
; i
++)
4785 if (vector
== hdev
->vector_irq
[i
])
4791 static int hclge_put_vector(struct hnae3_handle
*handle
, int vector
)
4793 struct hclge_vport
*vport
= hclge_get_vport(handle
);
4794 struct hclge_dev
*hdev
= vport
->back
;
4797 vector_id
= hclge_get_vector_index(hdev
, vector
);
4798 if (vector_id
< 0) {
4799 dev_err(&hdev
->pdev
->dev
,
4800 "Get vector index fail. vector = %d\n", vector
);
4804 hclge_free_vector(hdev
, vector_id
);
4809 static int hclge_get_rss(struct hnae3_handle
*handle
, u32
*indir
,
4812 struct hnae3_ae_dev
*ae_dev
= pci_get_drvdata(handle
->pdev
);
4813 struct hclge_vport
*vport
= hclge_get_vport(handle
);
4814 struct hclge_comm_rss_cfg
*rss_cfg
= &vport
->back
->rss_cfg
;
4816 hclge_comm_get_rss_hash_info(rss_cfg
, key
, hfunc
);
4818 hclge_comm_get_rss_indir_tbl(rss_cfg
, indir
,
4819 ae_dev
->dev_specs
.rss_ind_tbl_size
);
4824 static int hclge_set_rss(struct hnae3_handle
*handle
, const u32
*indir
,
4825 const u8
*key
, const u8 hfunc
)
4827 struct hnae3_ae_dev
*ae_dev
= pci_get_drvdata(handle
->pdev
);
4828 struct hclge_vport
*vport
= hclge_get_vport(handle
);
4829 struct hclge_dev
*hdev
= vport
->back
;
4830 struct hclge_comm_rss_cfg
*rss_cfg
= &hdev
->rss_cfg
;
4833 ret
= hclge_comm_set_rss_hash_key(rss_cfg
, &hdev
->hw
.hw
, key
, hfunc
);
4835 dev_err(&hdev
->pdev
->dev
, "invalid hfunc type %u\n", hfunc
);
4839 /* Update the shadow RSS table with user specified qids */
4840 for (i
= 0; i
< ae_dev
->dev_specs
.rss_ind_tbl_size
; i
++)
4841 rss_cfg
->rss_indirection_tbl
[i
] = indir
[i
];
4843 /* Update the hardware */
4844 return hclge_comm_set_rss_indir_table(ae_dev
, &hdev
->hw
.hw
,
4845 rss_cfg
->rss_indirection_tbl
);
4848 static int hclge_set_rss_tuple(struct hnae3_handle
*handle
,
4849 struct ethtool_rxnfc
*nfc
)
4851 struct hclge_vport
*vport
= hclge_get_vport(handle
);
4852 struct hclge_dev
*hdev
= vport
->back
;
4855 ret
= hclge_comm_set_rss_tuple(hdev
->ae_dev
, &hdev
->hw
.hw
,
4856 &hdev
->rss_cfg
, nfc
);
4858 dev_err(&hdev
->pdev
->dev
,
4859 "failed to set rss tuple, ret = %d.\n", ret
);
4866 static int hclge_get_rss_tuple(struct hnae3_handle
*handle
,
4867 struct ethtool_rxnfc
*nfc
)
4869 struct hclge_vport
*vport
= hclge_get_vport(handle
);
4875 ret
= hclge_comm_get_rss_tuple(&vport
->back
->rss_cfg
, nfc
->flow_type
,
4877 if (ret
|| !tuple_sets
)
4880 nfc
->data
= hclge_comm_convert_rss_tuple(tuple_sets
);
4885 static int hclge_get_tc_size(struct hnae3_handle
*handle
)
4887 struct hclge_vport
*vport
= hclge_get_vport(handle
);
4888 struct hclge_dev
*hdev
= vport
->back
;
4890 return hdev
->pf_rss_size_max
;
4893 static int hclge_init_rss_tc_mode(struct hclge_dev
*hdev
)
4895 struct hnae3_ae_dev
*ae_dev
= hdev
->ae_dev
;
4896 struct hclge_vport
*vport
= hdev
->vport
;
4897 u16 tc_offset
[HCLGE_MAX_TC_NUM
] = {0};
4898 u16 tc_valid
[HCLGE_MAX_TC_NUM
] = {0};
4899 u16 tc_size
[HCLGE_MAX_TC_NUM
] = {0};
4900 struct hnae3_tc_info
*tc_info
;
4905 tc_info
= &vport
->nic
.kinfo
.tc_info
;
4906 for (i
= 0; i
< HCLGE_MAX_TC_NUM
; i
++) {
4907 rss_size
= tc_info
->tqp_count
[i
];
4910 if (!(hdev
->hw_tc_map
& BIT(i
)))
4913 /* tc_size set to hardware is the log2 of roundup power of two
4914 * of rss_size, the acutal queue size is limited by indirection
4917 if (rss_size
> ae_dev
->dev_specs
.rss_ind_tbl_size
||
4919 dev_err(&hdev
->pdev
->dev
,
4920 "Configure rss tc size failed, invalid TC_SIZE = %u\n",
4925 roundup_size
= roundup_pow_of_two(rss_size
);
4926 roundup_size
= ilog2(roundup_size
);
4929 tc_size
[i
] = roundup_size
;
4930 tc_offset
[i
] = tc_info
->tqp_offset
[i
];
4933 return hclge_comm_set_rss_tc_mode(&hdev
->hw
.hw
, tc_offset
, tc_valid
,
4937 int hclge_rss_init_hw(struct hclge_dev
*hdev
)
4939 u16
*rss_indir
= hdev
->rss_cfg
.rss_indirection_tbl
;
4940 u8
*key
= hdev
->rss_cfg
.rss_hash_key
;
4941 u8 hfunc
= hdev
->rss_cfg
.rss_algo
;
4944 ret
= hclge_comm_set_rss_indir_table(hdev
->ae_dev
, &hdev
->hw
.hw
,
4949 ret
= hclge_comm_set_rss_algo_key(&hdev
->hw
.hw
, hfunc
, key
);
4953 ret
= hclge_comm_set_rss_input_tuple(&hdev
->hw
.hw
, &hdev
->rss_cfg
);
4957 return hclge_init_rss_tc_mode(hdev
);
4960 int hclge_bind_ring_with_vector(struct hclge_vport
*vport
,
4961 int vector_id
, bool en
,
4962 struct hnae3_ring_chain_node
*ring_chain
)
4964 struct hclge_dev
*hdev
= vport
->back
;
4965 struct hnae3_ring_chain_node
*node
;
4966 struct hclge_desc desc
;
4967 struct hclge_ctrl_vector_chain_cmd
*req
=
4968 (struct hclge_ctrl_vector_chain_cmd
*)desc
.data
;
4969 enum hclge_comm_cmd_status status
;
4970 enum hclge_opcode_type op
;
4971 u16 tqp_type_and_id
;
4974 op
= en
? HCLGE_OPC_ADD_RING_TO_VECTOR
: HCLGE_OPC_DEL_RING_TO_VECTOR
;
4975 hclge_cmd_setup_basic_desc(&desc
, op
, false);
4976 req
->int_vector_id_l
= hnae3_get_field(vector_id
,
4977 HCLGE_VECTOR_ID_L_M
,
4978 HCLGE_VECTOR_ID_L_S
);
4979 req
->int_vector_id_h
= hnae3_get_field(vector_id
,
4980 HCLGE_VECTOR_ID_H_M
,
4981 HCLGE_VECTOR_ID_H_S
);
4984 for (node
= ring_chain
; node
; node
= node
->next
) {
4985 tqp_type_and_id
= le16_to_cpu(req
->tqp_type_and_id
[i
]);
4986 hnae3_set_field(tqp_type_and_id
, HCLGE_INT_TYPE_M
,
4988 hnae3_get_bit(node
->flag
, HNAE3_RING_TYPE_B
));
4989 hnae3_set_field(tqp_type_and_id
, HCLGE_TQP_ID_M
,
4990 HCLGE_TQP_ID_S
, node
->tqp_index
);
4991 hnae3_set_field(tqp_type_and_id
, HCLGE_INT_GL_IDX_M
,
4993 hnae3_get_field(node
->int_gl_idx
,
4994 HNAE3_RING_GL_IDX_M
,
4995 HNAE3_RING_GL_IDX_S
));
4996 req
->tqp_type_and_id
[i
] = cpu_to_le16(tqp_type_and_id
);
4997 if (++i
>= HCLGE_VECTOR_ELEMENTS_PER_CMD
) {
4998 req
->int_cause_num
= HCLGE_VECTOR_ELEMENTS_PER_CMD
;
4999 req
->vfid
= vport
->vport_id
;
5001 status
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
5003 dev_err(&hdev
->pdev
->dev
,
5004 "Map TQP fail, status is %d.\n",
5010 hclge_cmd_setup_basic_desc(&desc
,
5013 req
->int_vector_id_l
=
5014 hnae3_get_field(vector_id
,
5015 HCLGE_VECTOR_ID_L_M
,
5016 HCLGE_VECTOR_ID_L_S
);
5017 req
->int_vector_id_h
=
5018 hnae3_get_field(vector_id
,
5019 HCLGE_VECTOR_ID_H_M
,
5020 HCLGE_VECTOR_ID_H_S
);
5025 req
->int_cause_num
= i
;
5026 req
->vfid
= vport
->vport_id
;
5027 status
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
5029 dev_err(&hdev
->pdev
->dev
,
5030 "Map TQP fail, status is %d.\n", status
);
5038 static int hclge_map_ring_to_vector(struct hnae3_handle
*handle
, int vector
,
5039 struct hnae3_ring_chain_node
*ring_chain
)
5041 struct hclge_vport
*vport
= hclge_get_vport(handle
);
5042 struct hclge_dev
*hdev
= vport
->back
;
5045 vector_id
= hclge_get_vector_index(hdev
, vector
);
5046 if (vector_id
< 0) {
5047 dev_err(&hdev
->pdev
->dev
,
5048 "failed to get vector index. vector=%d\n", vector
);
5052 return hclge_bind_ring_with_vector(vport
, vector_id
, true, ring_chain
);
5055 static int hclge_unmap_ring_frm_vector(struct hnae3_handle
*handle
, int vector
,
5056 struct hnae3_ring_chain_node
*ring_chain
)
5058 struct hclge_vport
*vport
= hclge_get_vport(handle
);
5059 struct hclge_dev
*hdev
= vport
->back
;
5062 if (test_bit(HCLGE_STATE_RST_HANDLING
, &hdev
->state
))
5065 vector_id
= hclge_get_vector_index(hdev
, vector
);
5066 if (vector_id
< 0) {
5067 dev_err(&handle
->pdev
->dev
,
5068 "Get vector index fail. ret =%d\n", vector_id
);
5072 ret
= hclge_bind_ring_with_vector(vport
, vector_id
, false, ring_chain
);
5074 dev_err(&handle
->pdev
->dev
,
5075 "Unmap ring from vector fail. vectorid=%d, ret =%d\n",
5081 static int hclge_cmd_set_promisc_mode(struct hclge_dev
*hdev
, u8 vf_id
,
5082 bool en_uc
, bool en_mc
, bool en_bc
)
5084 struct hclge_vport
*vport
= &hdev
->vport
[vf_id
];
5085 struct hnae3_handle
*handle
= &vport
->nic
;
5086 struct hclge_promisc_cfg_cmd
*req
;
5087 struct hclge_desc desc
;
5088 bool uc_tx_en
= en_uc
;
5092 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_CFG_PROMISC_MODE
, false);
5094 req
= (struct hclge_promisc_cfg_cmd
*)desc
.data
;
5097 if (test_bit(HNAE3_PFLAG_LIMIT_PROMISC
, &handle
->priv_flags
))
5100 hnae3_set_bit(promisc_cfg
, HCLGE_PROMISC_UC_RX_EN
, en_uc
? 1 : 0);
5101 hnae3_set_bit(promisc_cfg
, HCLGE_PROMISC_MC_RX_EN
, en_mc
? 1 : 0);
5102 hnae3_set_bit(promisc_cfg
, HCLGE_PROMISC_BC_RX_EN
, en_bc
? 1 : 0);
5103 hnae3_set_bit(promisc_cfg
, HCLGE_PROMISC_UC_TX_EN
, uc_tx_en
? 1 : 0);
5104 hnae3_set_bit(promisc_cfg
, HCLGE_PROMISC_MC_TX_EN
, en_mc
? 1 : 0);
5105 hnae3_set_bit(promisc_cfg
, HCLGE_PROMISC_BC_TX_EN
, en_bc
? 1 : 0);
5106 req
->extend_promisc
= promisc_cfg
;
5108 /* to be compatible with DEVICE_VERSION_V1/2 */
5110 hnae3_set_bit(promisc_cfg
, HCLGE_PROMISC_EN_UC
, en_uc
? 1 : 0);
5111 hnae3_set_bit(promisc_cfg
, HCLGE_PROMISC_EN_MC
, en_mc
? 1 : 0);
5112 hnae3_set_bit(promisc_cfg
, HCLGE_PROMISC_EN_BC
, en_bc
? 1 : 0);
5113 hnae3_set_bit(promisc_cfg
, HCLGE_PROMISC_TX_EN
, 1);
5114 hnae3_set_bit(promisc_cfg
, HCLGE_PROMISC_RX_EN
, 1);
5115 req
->promisc
= promisc_cfg
;
5117 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
5119 dev_err(&hdev
->pdev
->dev
,
5120 "failed to set vport %u promisc mode, ret = %d.\n",
5126 int hclge_set_vport_promisc_mode(struct hclge_vport
*vport
, bool en_uc_pmc
,
5127 bool en_mc_pmc
, bool en_bc_pmc
)
5129 return hclge_cmd_set_promisc_mode(vport
->back
, vport
->vport_id
,
5130 en_uc_pmc
, en_mc_pmc
, en_bc_pmc
);
5133 static int hclge_set_promisc_mode(struct hnae3_handle
*handle
, bool en_uc_pmc
,
5136 struct hclge_vport
*vport
= hclge_get_vport(handle
);
5137 struct hclge_dev
*hdev
= vport
->back
;
5138 bool en_bc_pmc
= true;
5140 /* For device whose version below V2, if broadcast promisc enabled,
5141 * vlan filter is always bypassed. So broadcast promisc should be
5142 * disabled until user enable promisc mode
5144 if (hdev
->ae_dev
->dev_version
< HNAE3_DEVICE_VERSION_V2
)
5145 en_bc_pmc
= handle
->netdev_flags
& HNAE3_BPE
? true : false;
5147 return hclge_set_vport_promisc_mode(vport
, en_uc_pmc
, en_mc_pmc
,
5151 static void hclge_request_update_promisc_mode(struct hnae3_handle
*handle
)
5153 struct hclge_vport
*vport
= hclge_get_vport(handle
);
5155 set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE
, &vport
->state
);
5158 static void hclge_sync_fd_state(struct hclge_dev
*hdev
)
5160 if (hlist_empty(&hdev
->fd_rule_list
))
5161 hdev
->fd_active_type
= HCLGE_FD_RULE_NONE
;
5164 static void hclge_fd_inc_rule_cnt(struct hclge_dev
*hdev
, u16 location
)
5166 if (!test_bit(location
, hdev
->fd_bmap
)) {
5167 set_bit(location
, hdev
->fd_bmap
);
5168 hdev
->hclge_fd_rule_num
++;
5172 static void hclge_fd_dec_rule_cnt(struct hclge_dev
*hdev
, u16 location
)
5174 if (test_bit(location
, hdev
->fd_bmap
)) {
5175 clear_bit(location
, hdev
->fd_bmap
);
5176 hdev
->hclge_fd_rule_num
--;
5180 static void hclge_fd_free_node(struct hclge_dev
*hdev
,
5181 struct hclge_fd_rule
*rule
)
5183 hlist_del(&rule
->rule_node
);
5185 hclge_sync_fd_state(hdev
);
5188 static void hclge_update_fd_rule_node(struct hclge_dev
*hdev
,
5189 struct hclge_fd_rule
*old_rule
,
5190 struct hclge_fd_rule
*new_rule
,
5191 enum HCLGE_FD_NODE_STATE state
)
5194 case HCLGE_FD_TO_ADD
:
5195 case HCLGE_FD_ACTIVE
:
5196 /* 1) if the new state is TO_ADD, just replace the old rule
5197 * with the same location, no matter its state, because the
5198 * new rule will be configured to the hardware.
5199 * 2) if the new state is ACTIVE, it means the new rule
5200 * has been configured to the hardware, so just replace
5201 * the old rule node with the same location.
5202 * 3) for it doesn't add a new node to the list, so it's
5203 * unnecessary to update the rule number and fd_bmap.
5205 new_rule
->rule_node
.next
= old_rule
->rule_node
.next
;
5206 new_rule
->rule_node
.pprev
= old_rule
->rule_node
.pprev
;
5207 memcpy(old_rule
, new_rule
, sizeof(*old_rule
));
5210 case HCLGE_FD_DELETED
:
5211 hclge_fd_dec_rule_cnt(hdev
, old_rule
->location
);
5212 hclge_fd_free_node(hdev
, old_rule
);
5214 case HCLGE_FD_TO_DEL
:
5215 /* if new request is TO_DEL, and old rule is existent
5216 * 1) the state of old rule is TO_DEL, we need do nothing,
5217 * because we delete rule by location, other rule content
5219 * 2) the state of old rule is ACTIVE, we need to change its
5220 * state to TO_DEL, so the rule will be deleted when periodic
5221 * task being scheduled.
5222 * 3) the state of old rule is TO_ADD, it means the rule hasn't
5223 * been added to hardware, so we just delete the rule node from
5224 * fd_rule_list directly.
5226 if (old_rule
->state
== HCLGE_FD_TO_ADD
) {
5227 hclge_fd_dec_rule_cnt(hdev
, old_rule
->location
);
5228 hclge_fd_free_node(hdev
, old_rule
);
5231 old_rule
->state
= HCLGE_FD_TO_DEL
;
5236 static struct hclge_fd_rule
*hclge_find_fd_rule(struct hlist_head
*hlist
,
5238 struct hclge_fd_rule
**parent
)
5240 struct hclge_fd_rule
*rule
;
5241 struct hlist_node
*node
;
5243 hlist_for_each_entry_safe(rule
, node
, hlist
, rule_node
) {
5244 if (rule
->location
== location
)
5246 else if (rule
->location
> location
)
5248 /* record the parent node, use to keep the nodes in fd_rule_list
5257 /* insert fd rule node in ascend order according to rule->location */
5258 static void hclge_fd_insert_rule_node(struct hlist_head
*hlist
,
5259 struct hclge_fd_rule
*rule
,
5260 struct hclge_fd_rule
*parent
)
5262 INIT_HLIST_NODE(&rule
->rule_node
);
5265 hlist_add_behind(&rule
->rule_node
, &parent
->rule_node
);
5267 hlist_add_head(&rule
->rule_node
, hlist
);
5270 static int hclge_fd_set_user_def_cmd(struct hclge_dev
*hdev
,
5271 struct hclge_fd_user_def_cfg
*cfg
)
5273 struct hclge_fd_user_def_cfg_cmd
*req
;
5274 struct hclge_desc desc
;
5278 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_FD_USER_DEF_OP
, false);
5280 req
= (struct hclge_fd_user_def_cfg_cmd
*)desc
.data
;
5282 hnae3_set_bit(data
, HCLGE_FD_USER_DEF_EN_B
, cfg
[0].ref_cnt
> 0);
5283 hnae3_set_field(data
, HCLGE_FD_USER_DEF_OFT_M
,
5284 HCLGE_FD_USER_DEF_OFT_S
, cfg
[0].offset
);
5285 req
->ol2_cfg
= cpu_to_le16(data
);
5288 hnae3_set_bit(data
, HCLGE_FD_USER_DEF_EN_B
, cfg
[1].ref_cnt
> 0);
5289 hnae3_set_field(data
, HCLGE_FD_USER_DEF_OFT_M
,
5290 HCLGE_FD_USER_DEF_OFT_S
, cfg
[1].offset
);
5291 req
->ol3_cfg
= cpu_to_le16(data
);
5294 hnae3_set_bit(data
, HCLGE_FD_USER_DEF_EN_B
, cfg
[2].ref_cnt
> 0);
5295 hnae3_set_field(data
, HCLGE_FD_USER_DEF_OFT_M
,
5296 HCLGE_FD_USER_DEF_OFT_S
, cfg
[2].offset
);
5297 req
->ol4_cfg
= cpu_to_le16(data
);
5299 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
5301 dev_err(&hdev
->pdev
->dev
,
5302 "failed to set fd user def data, ret= %d\n", ret
);
5306 static void hclge_sync_fd_user_def_cfg(struct hclge_dev
*hdev
, bool locked
)
5310 if (!test_and_clear_bit(HCLGE_STATE_FD_USER_DEF_CHANGED
, &hdev
->state
))
5314 spin_lock_bh(&hdev
->fd_rule_lock
);
5316 ret
= hclge_fd_set_user_def_cmd(hdev
, hdev
->fd_cfg
.user_def_cfg
);
5318 set_bit(HCLGE_STATE_FD_USER_DEF_CHANGED
, &hdev
->state
);
5321 spin_unlock_bh(&hdev
->fd_rule_lock
);
5324 static int hclge_fd_check_user_def_refcnt(struct hclge_dev
*hdev
,
5325 struct hclge_fd_rule
*rule
)
5327 struct hlist_head
*hlist
= &hdev
->fd_rule_list
;
5328 struct hclge_fd_rule
*fd_rule
, *parent
= NULL
;
5329 struct hclge_fd_user_def_info
*info
, *old_info
;
5330 struct hclge_fd_user_def_cfg
*cfg
;
5332 if (!rule
|| rule
->rule_type
!= HCLGE_FD_EP_ACTIVE
||
5333 rule
->ep
.user_def
.layer
== HCLGE_FD_USER_DEF_NONE
)
5336 /* for valid layer is start from 1, so need minus 1 to get the cfg */
5337 cfg
= &hdev
->fd_cfg
.user_def_cfg
[rule
->ep
.user_def
.layer
- 1];
5338 info
= &rule
->ep
.user_def
;
5340 if (!cfg
->ref_cnt
|| cfg
->offset
== info
->offset
)
5343 if (cfg
->ref_cnt
> 1)
5346 fd_rule
= hclge_find_fd_rule(hlist
, rule
->location
, &parent
);
5348 old_info
= &fd_rule
->ep
.user_def
;
5349 if (info
->layer
== old_info
->layer
)
5354 dev_err(&hdev
->pdev
->dev
,
5355 "No available offset for layer%d fd rule, each layer only support one user def offset.\n",
5360 static void hclge_fd_inc_user_def_refcnt(struct hclge_dev
*hdev
,
5361 struct hclge_fd_rule
*rule
)
5363 struct hclge_fd_user_def_cfg
*cfg
;
5365 if (!rule
|| rule
->rule_type
!= HCLGE_FD_EP_ACTIVE
||
5366 rule
->ep
.user_def
.layer
== HCLGE_FD_USER_DEF_NONE
)
5369 cfg
= &hdev
->fd_cfg
.user_def_cfg
[rule
->ep
.user_def
.layer
- 1];
5370 if (!cfg
->ref_cnt
) {
5371 cfg
->offset
= rule
->ep
.user_def
.offset
;
5372 set_bit(HCLGE_STATE_FD_USER_DEF_CHANGED
, &hdev
->state
);
5377 static void hclge_fd_dec_user_def_refcnt(struct hclge_dev
*hdev
,
5378 struct hclge_fd_rule
*rule
)
5380 struct hclge_fd_user_def_cfg
*cfg
;
5382 if (!rule
|| rule
->rule_type
!= HCLGE_FD_EP_ACTIVE
||
5383 rule
->ep
.user_def
.layer
== HCLGE_FD_USER_DEF_NONE
)
5386 cfg
= &hdev
->fd_cfg
.user_def_cfg
[rule
->ep
.user_def
.layer
- 1];
5391 if (!cfg
->ref_cnt
) {
5393 set_bit(HCLGE_STATE_FD_USER_DEF_CHANGED
, &hdev
->state
);
5397 static void hclge_update_fd_list(struct hclge_dev
*hdev
,
5398 enum HCLGE_FD_NODE_STATE state
, u16 location
,
5399 struct hclge_fd_rule
*new_rule
)
5401 struct hlist_head
*hlist
= &hdev
->fd_rule_list
;
5402 struct hclge_fd_rule
*fd_rule
, *parent
= NULL
;
5404 fd_rule
= hclge_find_fd_rule(hlist
, location
, &parent
);
5406 hclge_fd_dec_user_def_refcnt(hdev
, fd_rule
);
5407 if (state
== HCLGE_FD_ACTIVE
)
5408 hclge_fd_inc_user_def_refcnt(hdev
, new_rule
);
5409 hclge_sync_fd_user_def_cfg(hdev
, true);
5411 hclge_update_fd_rule_node(hdev
, fd_rule
, new_rule
, state
);
5415 /* it's unlikely to fail here, because we have checked the rule
5418 if (unlikely(state
== HCLGE_FD_TO_DEL
|| state
== HCLGE_FD_DELETED
)) {
5419 dev_warn(&hdev
->pdev
->dev
,
5420 "failed to delete fd rule %u, it's inexistent\n",
5425 hclge_fd_inc_user_def_refcnt(hdev
, new_rule
);
5426 hclge_sync_fd_user_def_cfg(hdev
, true);
5428 hclge_fd_insert_rule_node(hlist
, new_rule
, parent
);
5429 hclge_fd_inc_rule_cnt(hdev
, new_rule
->location
);
5431 if (state
== HCLGE_FD_TO_ADD
) {
5432 set_bit(HCLGE_STATE_FD_TBL_CHANGED
, &hdev
->state
);
5433 hclge_task_schedule(hdev
, 0);
5437 static int hclge_get_fd_mode(struct hclge_dev
*hdev
, u8
*fd_mode
)
5439 struct hclge_get_fd_mode_cmd
*req
;
5440 struct hclge_desc desc
;
5443 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_FD_MODE_CTRL
, true);
5445 req
= (struct hclge_get_fd_mode_cmd
*)desc
.data
;
5447 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
5449 dev_err(&hdev
->pdev
->dev
, "get fd mode fail, ret=%d\n", ret
);
5453 *fd_mode
= req
->mode
;
5458 static int hclge_get_fd_allocation(struct hclge_dev
*hdev
,
5459 u32
*stage1_entry_num
,
5460 u32
*stage2_entry_num
,
5461 u16
*stage1_counter_num
,
5462 u16
*stage2_counter_num
)
5464 struct hclge_get_fd_allocation_cmd
*req
;
5465 struct hclge_desc desc
;
5468 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_FD_GET_ALLOCATION
, true);
5470 req
= (struct hclge_get_fd_allocation_cmd
*)desc
.data
;
5472 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
5474 dev_err(&hdev
->pdev
->dev
, "query fd allocation fail, ret=%d\n",
5479 *stage1_entry_num
= le32_to_cpu(req
->stage1_entry_num
);
5480 *stage2_entry_num
= le32_to_cpu(req
->stage2_entry_num
);
5481 *stage1_counter_num
= le16_to_cpu(req
->stage1_counter_num
);
5482 *stage2_counter_num
= le16_to_cpu(req
->stage2_counter_num
);
5487 static int hclge_set_fd_key_config(struct hclge_dev
*hdev
,
5488 enum HCLGE_FD_STAGE stage_num
)
5490 struct hclge_set_fd_key_config_cmd
*req
;
5491 struct hclge_fd_key_cfg
*stage
;
5492 struct hclge_desc desc
;
5495 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_FD_KEY_CONFIG
, false);
5497 req
= (struct hclge_set_fd_key_config_cmd
*)desc
.data
;
5498 stage
= &hdev
->fd_cfg
.key_cfg
[stage_num
];
5499 req
->stage
= stage_num
;
5500 req
->key_select
= stage
->key_sel
;
5501 req
->inner_sipv6_word_en
= stage
->inner_sipv6_word_en
;
5502 req
->inner_dipv6_word_en
= stage
->inner_dipv6_word_en
;
5503 req
->outer_sipv6_word_en
= stage
->outer_sipv6_word_en
;
5504 req
->outer_dipv6_word_en
= stage
->outer_dipv6_word_en
;
5505 req
->tuple_mask
= cpu_to_le32(~stage
->tuple_active
);
5506 req
->meta_data_mask
= cpu_to_le32(~stage
->meta_data_active
);
5508 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
5510 dev_err(&hdev
->pdev
->dev
, "set fd key fail, ret=%d\n", ret
);
5515 static void hclge_fd_disable_user_def(struct hclge_dev
*hdev
)
5517 struct hclge_fd_user_def_cfg
*cfg
= hdev
->fd_cfg
.user_def_cfg
;
5519 spin_lock_bh(&hdev
->fd_rule_lock
);
5520 memset(cfg
, 0, sizeof(hdev
->fd_cfg
.user_def_cfg
));
5521 spin_unlock_bh(&hdev
->fd_rule_lock
);
5523 hclge_fd_set_user_def_cmd(hdev
, cfg
);
5526 static int hclge_init_fd_config(struct hclge_dev
*hdev
)
5528 #define LOW_2_WORDS 0x03
5529 struct hclge_fd_key_cfg
*key_cfg
;
5532 if (!hnae3_ae_dev_fd_supported(hdev
->ae_dev
))
5535 ret
= hclge_get_fd_mode(hdev
, &hdev
->fd_cfg
.fd_mode
);
5539 switch (hdev
->fd_cfg
.fd_mode
) {
5540 case HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1
:
5541 hdev
->fd_cfg
.max_key_length
= MAX_KEY_LENGTH
;
5543 case HCLGE_FD_MODE_DEPTH_4K_WIDTH_200B_STAGE_1
:
5544 hdev
->fd_cfg
.max_key_length
= MAX_KEY_LENGTH
/ 2;
5547 dev_err(&hdev
->pdev
->dev
,
5548 "Unsupported flow director mode %u\n",
5549 hdev
->fd_cfg
.fd_mode
);
5553 key_cfg
= &hdev
->fd_cfg
.key_cfg
[HCLGE_FD_STAGE_1
];
5554 key_cfg
->key_sel
= HCLGE_FD_KEY_BASE_ON_TUPLE
;
5555 key_cfg
->inner_sipv6_word_en
= LOW_2_WORDS
;
5556 key_cfg
->inner_dipv6_word_en
= LOW_2_WORDS
;
5557 key_cfg
->outer_sipv6_word_en
= 0;
5558 key_cfg
->outer_dipv6_word_en
= 0;
5560 key_cfg
->tuple_active
= BIT(INNER_VLAN_TAG_FST
) | BIT(INNER_ETH_TYPE
) |
5561 BIT(INNER_IP_PROTO
) | BIT(INNER_IP_TOS
) |
5562 BIT(INNER_SRC_IP
) | BIT(INNER_DST_IP
) |
5563 BIT(INNER_SRC_PORT
) | BIT(INNER_DST_PORT
);
5565 /* If use max 400bit key, we can support tuples for ether type */
5566 if (hdev
->fd_cfg
.fd_mode
== HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1
) {
5567 key_cfg
->tuple_active
|=
5568 BIT(INNER_DST_MAC
) | BIT(INNER_SRC_MAC
);
5569 if (hdev
->ae_dev
->dev_version
>= HNAE3_DEVICE_VERSION_V3
)
5570 key_cfg
->tuple_active
|= HCLGE_FD_TUPLE_USER_DEF_TUPLES
;
5573 /* roce_type is used to filter roce frames
5574 * dst_vport is used to specify the rule
5576 key_cfg
->meta_data_active
= BIT(ROCE_TYPE
) | BIT(DST_VPORT
);
5578 ret
= hclge_get_fd_allocation(hdev
,
5579 &hdev
->fd_cfg
.rule_num
[HCLGE_FD_STAGE_1
],
5580 &hdev
->fd_cfg
.rule_num
[HCLGE_FD_STAGE_2
],
5581 &hdev
->fd_cfg
.cnt_num
[HCLGE_FD_STAGE_1
],
5582 &hdev
->fd_cfg
.cnt_num
[HCLGE_FD_STAGE_2
]);
5586 return hclge_set_fd_key_config(hdev
, HCLGE_FD_STAGE_1
);
5589 static int hclge_fd_tcam_config(struct hclge_dev
*hdev
, u8 stage
, bool sel_x
,
5590 int loc
, u8
*key
, bool is_add
)
5592 struct hclge_fd_tcam_config_1_cmd
*req1
;
5593 struct hclge_fd_tcam_config_2_cmd
*req2
;
5594 struct hclge_fd_tcam_config_3_cmd
*req3
;
5595 struct hclge_desc desc
[3];
5598 hclge_cmd_setup_basic_desc(&desc
[0], HCLGE_OPC_FD_TCAM_OP
, false);
5599 desc
[0].flag
|= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT
);
5600 hclge_cmd_setup_basic_desc(&desc
[1], HCLGE_OPC_FD_TCAM_OP
, false);
5601 desc
[1].flag
|= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT
);
5602 hclge_cmd_setup_basic_desc(&desc
[2], HCLGE_OPC_FD_TCAM_OP
, false);
5604 req1
= (struct hclge_fd_tcam_config_1_cmd
*)desc
[0].data
;
5605 req2
= (struct hclge_fd_tcam_config_2_cmd
*)desc
[1].data
;
5606 req3
= (struct hclge_fd_tcam_config_3_cmd
*)desc
[2].data
;
5608 req1
->stage
= stage
;
5609 req1
->xy_sel
= sel_x
? 1 : 0;
5610 hnae3_set_bit(req1
->port_info
, HCLGE_FD_EPORT_SW_EN_B
, 0);
5611 req1
->index
= cpu_to_le32(loc
);
5612 req1
->entry_vld
= sel_x
? is_add
: 0;
5615 memcpy(req1
->tcam_data
, &key
[0], sizeof(req1
->tcam_data
));
5616 memcpy(req2
->tcam_data
, &key
[sizeof(req1
->tcam_data
)],
5617 sizeof(req2
->tcam_data
));
5618 memcpy(req3
->tcam_data
, &key
[sizeof(req1
->tcam_data
) +
5619 sizeof(req2
->tcam_data
)], sizeof(req3
->tcam_data
));
5622 ret
= hclge_cmd_send(&hdev
->hw
, desc
, 3);
5624 dev_err(&hdev
->pdev
->dev
,
5625 "config tcam key fail, ret=%d\n",
5631 static int hclge_fd_ad_config(struct hclge_dev
*hdev
, u8 stage
, int loc
,
5632 struct hclge_fd_ad_data
*action
)
5634 struct hnae3_ae_dev
*ae_dev
= pci_get_drvdata(hdev
->pdev
);
5635 struct hclge_fd_ad_config_cmd
*req
;
5636 struct hclge_desc desc
;
5640 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_FD_AD_OP
, false);
5642 req
= (struct hclge_fd_ad_config_cmd
*)desc
.data
;
5643 req
->index
= cpu_to_le32(loc
);
5646 hnae3_set_bit(ad_data
, HCLGE_FD_AD_WR_RULE_ID_B
,
5647 action
->write_rule_id_to_bd
);
5648 hnae3_set_field(ad_data
, HCLGE_FD_AD_RULE_ID_M
, HCLGE_FD_AD_RULE_ID_S
,
5650 if (test_bit(HNAE3_DEV_SUPPORT_FD_FORWARD_TC_B
, ae_dev
->caps
)) {
5651 hnae3_set_bit(ad_data
, HCLGE_FD_AD_TC_OVRD_B
,
5652 action
->override_tc
);
5653 hnae3_set_field(ad_data
, HCLGE_FD_AD_TC_SIZE_M
,
5654 HCLGE_FD_AD_TC_SIZE_S
, (u32
)action
->tc_size
);
5657 hnae3_set_bit(ad_data
, HCLGE_FD_AD_DROP_B
, action
->drop_packet
);
5658 hnae3_set_bit(ad_data
, HCLGE_FD_AD_DIRECT_QID_B
,
5659 action
->forward_to_direct_queue
);
5660 hnae3_set_field(ad_data
, HCLGE_FD_AD_QID_M
, HCLGE_FD_AD_QID_S
,
5662 hnae3_set_bit(ad_data
, HCLGE_FD_AD_USE_COUNTER_B
, action
->use_counter
);
5663 hnae3_set_field(ad_data
, HCLGE_FD_AD_COUNTER_NUM_M
,
5664 HCLGE_FD_AD_COUNTER_NUM_S
, action
->counter_id
);
5665 hnae3_set_bit(ad_data
, HCLGE_FD_AD_NXT_STEP_B
, action
->use_next_stage
);
5666 hnae3_set_field(ad_data
, HCLGE_FD_AD_NXT_KEY_M
, HCLGE_FD_AD_NXT_KEY_S
,
5667 action
->counter_id
);
5669 req
->ad_data
= cpu_to_le64(ad_data
);
5670 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
5672 dev_err(&hdev
->pdev
->dev
, "fd ad config fail, ret=%d\n", ret
);
5677 static bool hclge_fd_convert_tuple(u32 tuple_bit
, u8
*key_x
, u8
*key_y
,
5678 struct hclge_fd_rule
*rule
)
5680 int offset
, moffset
, ip_offset
;
5681 enum HCLGE_FD_KEY_OPT key_opt
;
5682 u16 tmp_x_s
, tmp_y_s
;
5683 u32 tmp_x_l
, tmp_y_l
;
5687 if (rule
->unused_tuple
& BIT(tuple_bit
))
5690 key_opt
= tuple_key_info
[tuple_bit
].key_opt
;
5691 offset
= tuple_key_info
[tuple_bit
].offset
;
5692 moffset
= tuple_key_info
[tuple_bit
].moffset
;
5696 calc_x(*key_x
, p
[offset
], p
[moffset
]);
5697 calc_y(*key_y
, p
[offset
], p
[moffset
]);
5701 calc_x(tmp_x_s
, *(u16
*)(&p
[offset
]), *(u16
*)(&p
[moffset
]));
5702 calc_y(tmp_y_s
, *(u16
*)(&p
[offset
]), *(u16
*)(&p
[moffset
]));
5703 *(__le16
*)key_x
= cpu_to_le16(tmp_x_s
);
5704 *(__le16
*)key_y
= cpu_to_le16(tmp_y_s
);
5708 calc_x(tmp_x_l
, *(u32
*)(&p
[offset
]), *(u32
*)(&p
[moffset
]));
5709 calc_y(tmp_y_l
, *(u32
*)(&p
[offset
]), *(u32
*)(&p
[moffset
]));
5710 *(__le32
*)key_x
= cpu_to_le32(tmp_x_l
);
5711 *(__le32
*)key_y
= cpu_to_le32(tmp_y_l
);
5715 for (i
= 0; i
< ETH_ALEN
; i
++) {
5716 calc_x(key_x
[ETH_ALEN
- 1 - i
], p
[offset
+ i
],
5718 calc_y(key_y
[ETH_ALEN
- 1 - i
], p
[offset
+ i
],
5724 ip_offset
= IPV4_INDEX
* sizeof(u32
);
5725 calc_x(tmp_x_l
, *(u32
*)(&p
[offset
+ ip_offset
]),
5726 *(u32
*)(&p
[moffset
+ ip_offset
]));
5727 calc_y(tmp_y_l
, *(u32
*)(&p
[offset
+ ip_offset
]),
5728 *(u32
*)(&p
[moffset
+ ip_offset
]));
5729 *(__le32
*)key_x
= cpu_to_le32(tmp_x_l
);
5730 *(__le32
*)key_y
= cpu_to_le32(tmp_y_l
);
5738 static u32
hclge_get_port_number(enum HLCGE_PORT_TYPE port_type
, u8 pf_id
,
5739 u8 vf_id
, u8 network_port_id
)
5741 u32 port_number
= 0;
5743 if (port_type
== HOST_PORT
) {
5744 hnae3_set_field(port_number
, HCLGE_PF_ID_M
, HCLGE_PF_ID_S
,
5746 hnae3_set_field(port_number
, HCLGE_VF_ID_M
, HCLGE_VF_ID_S
,
5748 hnae3_set_bit(port_number
, HCLGE_PORT_TYPE_B
, HOST_PORT
);
5750 hnae3_set_field(port_number
, HCLGE_NETWORK_PORT_ID_M
,
5751 HCLGE_NETWORK_PORT_ID_S
, network_port_id
);
5752 hnae3_set_bit(port_number
, HCLGE_PORT_TYPE_B
, NETWORK_PORT
);
5758 static void hclge_fd_convert_meta_data(struct hclge_fd_key_cfg
*key_cfg
,
5759 __le32
*key_x
, __le32
*key_y
,
5760 struct hclge_fd_rule
*rule
)
5762 u32 tuple_bit
, meta_data
= 0, tmp_x
, tmp_y
, port_number
;
5763 u8 cur_pos
= 0, tuple_size
, shift_bits
;
5766 for (i
= 0; i
< MAX_META_DATA
; i
++) {
5767 tuple_size
= meta_data_key_info
[i
].key_length
;
5768 tuple_bit
= key_cfg
->meta_data_active
& BIT(i
);
5770 switch (tuple_bit
) {
5771 case BIT(ROCE_TYPE
):
5772 hnae3_set_bit(meta_data
, cur_pos
, NIC_PACKET
);
5773 cur_pos
+= tuple_size
;
5775 case BIT(DST_VPORT
):
5776 port_number
= hclge_get_port_number(HOST_PORT
, 0,
5778 hnae3_set_field(meta_data
,
5779 GENMASK(cur_pos
+ tuple_size
, cur_pos
),
5780 cur_pos
, port_number
);
5781 cur_pos
+= tuple_size
;
5788 calc_x(tmp_x
, meta_data
, 0xFFFFFFFF);
5789 calc_y(tmp_y
, meta_data
, 0xFFFFFFFF);
5790 shift_bits
= sizeof(meta_data
) * 8 - cur_pos
;
5792 *key_x
= cpu_to_le32(tmp_x
<< shift_bits
);
5793 *key_y
= cpu_to_le32(tmp_y
<< shift_bits
);
5796 /* A complete key is combined with meta data key and tuple key.
5797 * Meta data key is stored at the MSB region, and tuple key is stored at
5798 * the LSB region, unused bits will be filled 0.
5800 static int hclge_config_key(struct hclge_dev
*hdev
, u8 stage
,
5801 struct hclge_fd_rule
*rule
)
5803 struct hclge_fd_key_cfg
*key_cfg
= &hdev
->fd_cfg
.key_cfg
[stage
];
5804 u8 key_x
[MAX_KEY_BYTES
], key_y
[MAX_KEY_BYTES
];
5805 u8
*cur_key_x
, *cur_key_y
;
5806 u8 meta_data_region
;
5811 memset(key_x
, 0, sizeof(key_x
));
5812 memset(key_y
, 0, sizeof(key_y
));
5816 for (i
= 0; i
< MAX_TUPLE
; i
++) {
5819 tuple_size
= tuple_key_info
[i
].key_length
/ 8;
5820 if (!(key_cfg
->tuple_active
& BIT(i
)))
5823 tuple_valid
= hclge_fd_convert_tuple(i
, cur_key_x
,
5826 cur_key_x
+= tuple_size
;
5827 cur_key_y
+= tuple_size
;
5831 meta_data_region
= hdev
->fd_cfg
.max_key_length
/ 8 -
5832 MAX_META_DATA_LENGTH
/ 8;
5834 hclge_fd_convert_meta_data(key_cfg
,
5835 (__le32
*)(key_x
+ meta_data_region
),
5836 (__le32
*)(key_y
+ meta_data_region
),
5839 ret
= hclge_fd_tcam_config(hdev
, stage
, false, rule
->location
, key_y
,
5842 dev_err(&hdev
->pdev
->dev
,
5843 "fd key_y config fail, loc=%u, ret=%d\n",
5844 rule
->queue_id
, ret
);
5848 ret
= hclge_fd_tcam_config(hdev
, stage
, true, rule
->location
, key_x
,
5851 dev_err(&hdev
->pdev
->dev
,
5852 "fd key_x config fail, loc=%u, ret=%d\n",
5853 rule
->queue_id
, ret
);
5857 static int hclge_config_action(struct hclge_dev
*hdev
, u8 stage
,
5858 struct hclge_fd_rule
*rule
)
5860 struct hclge_vport
*vport
= hdev
->vport
;
5861 struct hnae3_knic_private_info
*kinfo
= &vport
->nic
.kinfo
;
5862 struct hclge_fd_ad_data ad_data
;
5864 memset(&ad_data
, 0, sizeof(struct hclge_fd_ad_data
));
5865 ad_data
.ad_id
= rule
->location
;
5867 if (rule
->action
== HCLGE_FD_ACTION_DROP_PACKET
) {
5868 ad_data
.drop_packet
= true;
5869 } else if (rule
->action
== HCLGE_FD_ACTION_SELECT_TC
) {
5870 ad_data
.override_tc
= true;
5872 kinfo
->tc_info
.tqp_offset
[rule
->cls_flower
.tc
];
5874 ilog2(kinfo
->tc_info
.tqp_count
[rule
->cls_flower
.tc
]);
5876 ad_data
.forward_to_direct_queue
= true;
5877 ad_data
.queue_id
= rule
->queue_id
;
5880 if (hdev
->fd_cfg
.cnt_num
[HCLGE_FD_STAGE_1
]) {
5881 ad_data
.use_counter
= true;
5882 ad_data
.counter_id
= rule
->vf_id
%
5883 hdev
->fd_cfg
.cnt_num
[HCLGE_FD_STAGE_1
];
5885 ad_data
.use_counter
= false;
5886 ad_data
.counter_id
= 0;
5889 ad_data
.use_next_stage
= false;
5890 ad_data
.next_input_key
= 0;
5892 ad_data
.write_rule_id_to_bd
= true;
5893 ad_data
.rule_id
= rule
->location
;
5895 return hclge_fd_ad_config(hdev
, stage
, ad_data
.ad_id
, &ad_data
);
5898 static int hclge_fd_check_tcpip4_tuple(struct ethtool_tcpip4_spec
*spec
,
5901 if (!spec
|| !unused_tuple
)
5904 *unused_tuple
|= BIT(INNER_SRC_MAC
) | BIT(INNER_DST_MAC
);
5907 *unused_tuple
|= BIT(INNER_SRC_IP
);
5910 *unused_tuple
|= BIT(INNER_DST_IP
);
5913 *unused_tuple
|= BIT(INNER_SRC_PORT
);
5916 *unused_tuple
|= BIT(INNER_DST_PORT
);
5919 *unused_tuple
|= BIT(INNER_IP_TOS
);
5924 static int hclge_fd_check_ip4_tuple(struct ethtool_usrip4_spec
*spec
,
5927 if (!spec
|| !unused_tuple
)
5930 *unused_tuple
|= BIT(INNER_SRC_MAC
) | BIT(INNER_DST_MAC
) |
5931 BIT(INNER_SRC_PORT
) | BIT(INNER_DST_PORT
);
5934 *unused_tuple
|= BIT(INNER_SRC_IP
);
5937 *unused_tuple
|= BIT(INNER_DST_IP
);
5940 *unused_tuple
|= BIT(INNER_IP_TOS
);
5943 *unused_tuple
|= BIT(INNER_IP_PROTO
);
5945 if (spec
->l4_4_bytes
)
5948 if (spec
->ip_ver
!= ETH_RX_NFC_IP4
)
5954 static int hclge_fd_check_tcpip6_tuple(struct ethtool_tcpip6_spec
*spec
,
5957 if (!spec
|| !unused_tuple
)
5960 *unused_tuple
|= BIT(INNER_SRC_MAC
) | BIT(INNER_DST_MAC
);
5962 /* check whether src/dst ip address used */
5963 if (ipv6_addr_any((struct in6_addr
*)spec
->ip6src
))
5964 *unused_tuple
|= BIT(INNER_SRC_IP
);
5966 if (ipv6_addr_any((struct in6_addr
*)spec
->ip6dst
))
5967 *unused_tuple
|= BIT(INNER_DST_IP
);
5970 *unused_tuple
|= BIT(INNER_SRC_PORT
);
5973 *unused_tuple
|= BIT(INNER_DST_PORT
);
5976 *unused_tuple
|= BIT(INNER_IP_TOS
);
5981 static int hclge_fd_check_ip6_tuple(struct ethtool_usrip6_spec
*spec
,
5984 if (!spec
|| !unused_tuple
)
5987 *unused_tuple
|= BIT(INNER_SRC_MAC
) | BIT(INNER_DST_MAC
) |
5988 BIT(INNER_SRC_PORT
) | BIT(INNER_DST_PORT
);
5990 /* check whether src/dst ip address used */
5991 if (ipv6_addr_any((struct in6_addr
*)spec
->ip6src
))
5992 *unused_tuple
|= BIT(INNER_SRC_IP
);
5994 if (ipv6_addr_any((struct in6_addr
*)spec
->ip6dst
))
5995 *unused_tuple
|= BIT(INNER_DST_IP
);
5997 if (!spec
->l4_proto
)
5998 *unused_tuple
|= BIT(INNER_IP_PROTO
);
6001 *unused_tuple
|= BIT(INNER_IP_TOS
);
6003 if (spec
->l4_4_bytes
)
6009 static int hclge_fd_check_ether_tuple(struct ethhdr
*spec
, u32
*unused_tuple
)
6011 if (!spec
|| !unused_tuple
)
6014 *unused_tuple
|= BIT(INNER_SRC_IP
) | BIT(INNER_DST_IP
) |
6015 BIT(INNER_SRC_PORT
) | BIT(INNER_DST_PORT
) |
6016 BIT(INNER_IP_TOS
) | BIT(INNER_IP_PROTO
);
6018 if (is_zero_ether_addr(spec
->h_source
))
6019 *unused_tuple
|= BIT(INNER_SRC_MAC
);
6021 if (is_zero_ether_addr(spec
->h_dest
))
6022 *unused_tuple
|= BIT(INNER_DST_MAC
);
6025 *unused_tuple
|= BIT(INNER_ETH_TYPE
);
6030 static int hclge_fd_check_ext_tuple(struct hclge_dev
*hdev
,
6031 struct ethtool_rx_flow_spec
*fs
,
6034 if (fs
->flow_type
& FLOW_EXT
) {
6035 if (fs
->h_ext
.vlan_etype
) {
6036 dev_err(&hdev
->pdev
->dev
, "vlan-etype is not supported!\n");
6040 if (!fs
->h_ext
.vlan_tci
)
6041 *unused_tuple
|= BIT(INNER_VLAN_TAG_FST
);
6043 if (fs
->m_ext
.vlan_tci
&&
6044 be16_to_cpu(fs
->h_ext
.vlan_tci
) >= VLAN_N_VID
) {
6045 dev_err(&hdev
->pdev
->dev
,
6046 "failed to config vlan_tci, invalid vlan_tci: %u, max is %d.\n",
6047 ntohs(fs
->h_ext
.vlan_tci
), VLAN_N_VID
- 1);
6051 *unused_tuple
|= BIT(INNER_VLAN_TAG_FST
);
6054 if (fs
->flow_type
& FLOW_MAC_EXT
) {
6055 if (hdev
->fd_cfg
.fd_mode
!=
6056 HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1
) {
6057 dev_err(&hdev
->pdev
->dev
,
6058 "FLOW_MAC_EXT is not supported in current fd mode!\n");
6062 if (is_zero_ether_addr(fs
->h_ext
.h_dest
))
6063 *unused_tuple
|= BIT(INNER_DST_MAC
);
6065 *unused_tuple
&= ~BIT(INNER_DST_MAC
);
6071 static int hclge_fd_get_user_def_layer(u32 flow_type
, u32
*unused_tuple
,
6072 struct hclge_fd_user_def_info
*info
)
6074 switch (flow_type
) {
6076 info
->layer
= HCLGE_FD_USER_DEF_L2
;
6077 *unused_tuple
&= ~BIT(INNER_L2_RSV
);
6080 case IPV6_USER_FLOW
:
6081 info
->layer
= HCLGE_FD_USER_DEF_L3
;
6082 *unused_tuple
&= ~BIT(INNER_L3_RSV
);
6088 info
->layer
= HCLGE_FD_USER_DEF_L4
;
6089 *unused_tuple
&= ~BIT(INNER_L4_RSV
);
6098 static bool hclge_fd_is_user_def_all_masked(struct ethtool_rx_flow_spec
*fs
)
6100 return be32_to_cpu(fs
->m_ext
.data
[1] | fs
->m_ext
.data
[0]) == 0;
6103 static int hclge_fd_parse_user_def_field(struct hclge_dev
*hdev
,
6104 struct ethtool_rx_flow_spec
*fs
,
6106 struct hclge_fd_user_def_info
*info
)
6108 u32 tuple_active
= hdev
->fd_cfg
.key_cfg
[HCLGE_FD_STAGE_1
].tuple_active
;
6109 u32 flow_type
= fs
->flow_type
& ~(FLOW_EXT
| FLOW_MAC_EXT
);
6110 u16 data
, offset
, data_mask
, offset_mask
;
6113 info
->layer
= HCLGE_FD_USER_DEF_NONE
;
6114 *unused_tuple
|= HCLGE_FD_TUPLE_USER_DEF_TUPLES
;
6116 if (!(fs
->flow_type
& FLOW_EXT
) || hclge_fd_is_user_def_all_masked(fs
))
6119 /* user-def data from ethtool is 64 bit value, the bit0~15 is used
6120 * for data, and bit32~47 is used for offset.
6122 data
= be32_to_cpu(fs
->h_ext
.data
[1]) & HCLGE_FD_USER_DEF_DATA
;
6123 data_mask
= be32_to_cpu(fs
->m_ext
.data
[1]) & HCLGE_FD_USER_DEF_DATA
;
6124 offset
= be32_to_cpu(fs
->h_ext
.data
[0]) & HCLGE_FD_USER_DEF_OFFSET
;
6125 offset_mask
= be32_to_cpu(fs
->m_ext
.data
[0]) & HCLGE_FD_USER_DEF_OFFSET
;
6127 if (!(tuple_active
& HCLGE_FD_TUPLE_USER_DEF_TUPLES
)) {
6128 dev_err(&hdev
->pdev
->dev
, "user-def bytes are not supported\n");
6132 if (offset
> HCLGE_FD_MAX_USER_DEF_OFFSET
) {
6133 dev_err(&hdev
->pdev
->dev
,
6134 "user-def offset[%u] should be no more than %u\n",
6135 offset
, HCLGE_FD_MAX_USER_DEF_OFFSET
);
6139 if (offset_mask
!= HCLGE_FD_USER_DEF_OFFSET_UNMASK
) {
6140 dev_err(&hdev
->pdev
->dev
, "user-def offset can't be masked\n");
6144 ret
= hclge_fd_get_user_def_layer(flow_type
, unused_tuple
, info
);
6146 dev_err(&hdev
->pdev
->dev
,
6147 "unsupported flow type for user-def bytes, ret = %d\n",
6153 info
->data_mask
= data_mask
;
6154 info
->offset
= offset
;
6159 static int hclge_fd_check_spec(struct hclge_dev
*hdev
,
6160 struct ethtool_rx_flow_spec
*fs
,
6162 struct hclge_fd_user_def_info
*info
)
6167 if (fs
->location
>= hdev
->fd_cfg
.rule_num
[HCLGE_FD_STAGE_1
]) {
6168 dev_err(&hdev
->pdev
->dev
,
6169 "failed to config fd rules, invalid rule location: %u, max is %u\n.",
6171 hdev
->fd_cfg
.rule_num
[HCLGE_FD_STAGE_1
] - 1);
6175 ret
= hclge_fd_parse_user_def_field(hdev
, fs
, unused_tuple
, info
);
6179 flow_type
= fs
->flow_type
& ~(FLOW_EXT
| FLOW_MAC_EXT
);
6180 switch (flow_type
) {
6184 ret
= hclge_fd_check_tcpip4_tuple(&fs
->h_u
.tcp_ip4_spec
,
6188 ret
= hclge_fd_check_ip4_tuple(&fs
->h_u
.usr_ip4_spec
,
6194 ret
= hclge_fd_check_tcpip6_tuple(&fs
->h_u
.tcp_ip6_spec
,
6197 case IPV6_USER_FLOW
:
6198 ret
= hclge_fd_check_ip6_tuple(&fs
->h_u
.usr_ip6_spec
,
6202 if (hdev
->fd_cfg
.fd_mode
!=
6203 HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1
) {
6204 dev_err(&hdev
->pdev
->dev
,
6205 "ETHER_FLOW is not supported in current fd mode!\n");
6209 ret
= hclge_fd_check_ether_tuple(&fs
->h_u
.ether_spec
,
6213 dev_err(&hdev
->pdev
->dev
,
6214 "unsupported protocol type, protocol type = %#x\n",
6220 dev_err(&hdev
->pdev
->dev
,
6221 "failed to check flow union tuple, ret = %d\n",
6226 return hclge_fd_check_ext_tuple(hdev
, fs
, unused_tuple
);
6229 static void hclge_fd_get_tcpip4_tuple(struct ethtool_rx_flow_spec
*fs
,
6230 struct hclge_fd_rule
*rule
, u8 ip_proto
)
6232 rule
->tuples
.src_ip
[IPV4_INDEX
] =
6233 be32_to_cpu(fs
->h_u
.tcp_ip4_spec
.ip4src
);
6234 rule
->tuples_mask
.src_ip
[IPV4_INDEX
] =
6235 be32_to_cpu(fs
->m_u
.tcp_ip4_spec
.ip4src
);
6237 rule
->tuples
.dst_ip
[IPV4_INDEX
] =
6238 be32_to_cpu(fs
->h_u
.tcp_ip4_spec
.ip4dst
);
6239 rule
->tuples_mask
.dst_ip
[IPV4_INDEX
] =
6240 be32_to_cpu(fs
->m_u
.tcp_ip4_spec
.ip4dst
);
6242 rule
->tuples
.src_port
= be16_to_cpu(fs
->h_u
.tcp_ip4_spec
.psrc
);
6243 rule
->tuples_mask
.src_port
= be16_to_cpu(fs
->m_u
.tcp_ip4_spec
.psrc
);
6245 rule
->tuples
.dst_port
= be16_to_cpu(fs
->h_u
.tcp_ip4_spec
.pdst
);
6246 rule
->tuples_mask
.dst_port
= be16_to_cpu(fs
->m_u
.tcp_ip4_spec
.pdst
);
6248 rule
->tuples
.ip_tos
= fs
->h_u
.tcp_ip4_spec
.tos
;
6249 rule
->tuples_mask
.ip_tos
= fs
->m_u
.tcp_ip4_spec
.tos
;
6251 rule
->tuples
.ether_proto
= ETH_P_IP
;
6252 rule
->tuples_mask
.ether_proto
= 0xFFFF;
6254 rule
->tuples
.ip_proto
= ip_proto
;
6255 rule
->tuples_mask
.ip_proto
= 0xFF;
6258 static void hclge_fd_get_ip4_tuple(struct ethtool_rx_flow_spec
*fs
,
6259 struct hclge_fd_rule
*rule
)
6261 rule
->tuples
.src_ip
[IPV4_INDEX
] =
6262 be32_to_cpu(fs
->h_u
.usr_ip4_spec
.ip4src
);
6263 rule
->tuples_mask
.src_ip
[IPV4_INDEX
] =
6264 be32_to_cpu(fs
->m_u
.usr_ip4_spec
.ip4src
);
6266 rule
->tuples
.dst_ip
[IPV4_INDEX
] =
6267 be32_to_cpu(fs
->h_u
.usr_ip4_spec
.ip4dst
);
6268 rule
->tuples_mask
.dst_ip
[IPV4_INDEX
] =
6269 be32_to_cpu(fs
->m_u
.usr_ip4_spec
.ip4dst
);
6271 rule
->tuples
.ip_tos
= fs
->h_u
.usr_ip4_spec
.tos
;
6272 rule
->tuples_mask
.ip_tos
= fs
->m_u
.usr_ip4_spec
.tos
;
6274 rule
->tuples
.ip_proto
= fs
->h_u
.usr_ip4_spec
.proto
;
6275 rule
->tuples_mask
.ip_proto
= fs
->m_u
.usr_ip4_spec
.proto
;
6277 rule
->tuples
.ether_proto
= ETH_P_IP
;
6278 rule
->tuples_mask
.ether_proto
= 0xFFFF;
6281 static void hclge_fd_get_tcpip6_tuple(struct ethtool_rx_flow_spec
*fs
,
6282 struct hclge_fd_rule
*rule
, u8 ip_proto
)
6284 ipv6_addr_be32_to_cpu(rule
->tuples
.src_ip
,
6285 fs
->h_u
.tcp_ip6_spec
.ip6src
);
6286 ipv6_addr_be32_to_cpu(rule
->tuples_mask
.src_ip
,
6287 fs
->m_u
.tcp_ip6_spec
.ip6src
);
6289 ipv6_addr_be32_to_cpu(rule
->tuples
.dst_ip
,
6290 fs
->h_u
.tcp_ip6_spec
.ip6dst
);
6291 ipv6_addr_be32_to_cpu(rule
->tuples_mask
.dst_ip
,
6292 fs
->m_u
.tcp_ip6_spec
.ip6dst
);
6294 rule
->tuples
.src_port
= be16_to_cpu(fs
->h_u
.tcp_ip6_spec
.psrc
);
6295 rule
->tuples_mask
.src_port
= be16_to_cpu(fs
->m_u
.tcp_ip6_spec
.psrc
);
6297 rule
->tuples
.dst_port
= be16_to_cpu(fs
->h_u
.tcp_ip6_spec
.pdst
);
6298 rule
->tuples_mask
.dst_port
= be16_to_cpu(fs
->m_u
.tcp_ip6_spec
.pdst
);
6300 rule
->tuples
.ether_proto
= ETH_P_IPV6
;
6301 rule
->tuples_mask
.ether_proto
= 0xFFFF;
6303 rule
->tuples
.ip_tos
= fs
->h_u
.tcp_ip6_spec
.tclass
;
6304 rule
->tuples_mask
.ip_tos
= fs
->m_u
.tcp_ip6_spec
.tclass
;
6306 rule
->tuples
.ip_proto
= ip_proto
;
6307 rule
->tuples_mask
.ip_proto
= 0xFF;
6310 static void hclge_fd_get_ip6_tuple(struct ethtool_rx_flow_spec
*fs
,
6311 struct hclge_fd_rule
*rule
)
6313 ipv6_addr_be32_to_cpu(rule
->tuples
.src_ip
,
6314 fs
->h_u
.usr_ip6_spec
.ip6src
);
6315 ipv6_addr_be32_to_cpu(rule
->tuples_mask
.src_ip
,
6316 fs
->m_u
.usr_ip6_spec
.ip6src
);
6318 ipv6_addr_be32_to_cpu(rule
->tuples
.dst_ip
,
6319 fs
->h_u
.usr_ip6_spec
.ip6dst
);
6320 ipv6_addr_be32_to_cpu(rule
->tuples_mask
.dst_ip
,
6321 fs
->m_u
.usr_ip6_spec
.ip6dst
);
6323 rule
->tuples
.ip_proto
= fs
->h_u
.usr_ip6_spec
.l4_proto
;
6324 rule
->tuples_mask
.ip_proto
= fs
->m_u
.usr_ip6_spec
.l4_proto
;
6326 rule
->tuples
.ip_tos
= fs
->h_u
.tcp_ip6_spec
.tclass
;
6327 rule
->tuples_mask
.ip_tos
= fs
->m_u
.tcp_ip6_spec
.tclass
;
6329 rule
->tuples
.ether_proto
= ETH_P_IPV6
;
6330 rule
->tuples_mask
.ether_proto
= 0xFFFF;
6333 static void hclge_fd_get_ether_tuple(struct ethtool_rx_flow_spec
*fs
,
6334 struct hclge_fd_rule
*rule
)
6336 ether_addr_copy(rule
->tuples
.src_mac
, fs
->h_u
.ether_spec
.h_source
);
6337 ether_addr_copy(rule
->tuples_mask
.src_mac
, fs
->m_u
.ether_spec
.h_source
);
6339 ether_addr_copy(rule
->tuples
.dst_mac
, fs
->h_u
.ether_spec
.h_dest
);
6340 ether_addr_copy(rule
->tuples_mask
.dst_mac
, fs
->m_u
.ether_spec
.h_dest
);
6342 rule
->tuples
.ether_proto
= be16_to_cpu(fs
->h_u
.ether_spec
.h_proto
);
6343 rule
->tuples_mask
.ether_proto
= be16_to_cpu(fs
->m_u
.ether_spec
.h_proto
);
6346 static void hclge_fd_get_user_def_tuple(struct hclge_fd_user_def_info
*info
,
6347 struct hclge_fd_rule
*rule
)
6349 switch (info
->layer
) {
6350 case HCLGE_FD_USER_DEF_L2
:
6351 rule
->tuples
.l2_user_def
= info
->data
;
6352 rule
->tuples_mask
.l2_user_def
= info
->data_mask
;
6354 case HCLGE_FD_USER_DEF_L3
:
6355 rule
->tuples
.l3_user_def
= info
->data
;
6356 rule
->tuples_mask
.l3_user_def
= info
->data_mask
;
6358 case HCLGE_FD_USER_DEF_L4
:
6359 rule
->tuples
.l4_user_def
= (u32
)info
->data
<< 16;
6360 rule
->tuples_mask
.l4_user_def
= (u32
)info
->data_mask
<< 16;
6366 rule
->ep
.user_def
= *info
;
6369 static int hclge_fd_get_tuple(struct ethtool_rx_flow_spec
*fs
,
6370 struct hclge_fd_rule
*rule
,
6371 struct hclge_fd_user_def_info
*info
)
6373 u32 flow_type
= fs
->flow_type
& ~(FLOW_EXT
| FLOW_MAC_EXT
);
6375 switch (flow_type
) {
6377 hclge_fd_get_tcpip4_tuple(fs
, rule
, IPPROTO_SCTP
);
6380 hclge_fd_get_tcpip4_tuple(fs
, rule
, IPPROTO_TCP
);
6383 hclge_fd_get_tcpip4_tuple(fs
, rule
, IPPROTO_UDP
);
6386 hclge_fd_get_ip4_tuple(fs
, rule
);
6389 hclge_fd_get_tcpip6_tuple(fs
, rule
, IPPROTO_SCTP
);
6392 hclge_fd_get_tcpip6_tuple(fs
, rule
, IPPROTO_TCP
);
6395 hclge_fd_get_tcpip6_tuple(fs
, rule
, IPPROTO_UDP
);
6397 case IPV6_USER_FLOW
:
6398 hclge_fd_get_ip6_tuple(fs
, rule
);
6401 hclge_fd_get_ether_tuple(fs
, rule
);
6407 if (fs
->flow_type
& FLOW_EXT
) {
6408 rule
->tuples
.vlan_tag1
= be16_to_cpu(fs
->h_ext
.vlan_tci
);
6409 rule
->tuples_mask
.vlan_tag1
= be16_to_cpu(fs
->m_ext
.vlan_tci
);
6410 hclge_fd_get_user_def_tuple(info
, rule
);
6413 if (fs
->flow_type
& FLOW_MAC_EXT
) {
6414 ether_addr_copy(rule
->tuples
.dst_mac
, fs
->h_ext
.h_dest
);
6415 ether_addr_copy(rule
->tuples_mask
.dst_mac
, fs
->m_ext
.h_dest
);
6421 static int hclge_fd_config_rule(struct hclge_dev
*hdev
,
6422 struct hclge_fd_rule
*rule
)
6426 ret
= hclge_config_action(hdev
, HCLGE_FD_STAGE_1
, rule
);
6430 return hclge_config_key(hdev
, HCLGE_FD_STAGE_1
, rule
);
6433 static int hclge_add_fd_entry_common(struct hclge_dev
*hdev
,
6434 struct hclge_fd_rule
*rule
)
6438 spin_lock_bh(&hdev
->fd_rule_lock
);
6440 if (hdev
->fd_active_type
!= rule
->rule_type
&&
6441 (hdev
->fd_active_type
== HCLGE_FD_TC_FLOWER_ACTIVE
||
6442 hdev
->fd_active_type
== HCLGE_FD_EP_ACTIVE
)) {
6443 dev_err(&hdev
->pdev
->dev
,
6444 "mode conflict(new type %d, active type %d), please delete existent rules first\n",
6445 rule
->rule_type
, hdev
->fd_active_type
);
6446 spin_unlock_bh(&hdev
->fd_rule_lock
);
6450 ret
= hclge_fd_check_user_def_refcnt(hdev
, rule
);
6454 ret
= hclge_clear_arfs_rules(hdev
);
6458 ret
= hclge_fd_config_rule(hdev
, rule
);
6462 rule
->state
= HCLGE_FD_ACTIVE
;
6463 hdev
->fd_active_type
= rule
->rule_type
;
6464 hclge_update_fd_list(hdev
, rule
->state
, rule
->location
, rule
);
6467 spin_unlock_bh(&hdev
->fd_rule_lock
);
6471 static bool hclge_is_cls_flower_active(struct hnae3_handle
*handle
)
6473 struct hclge_vport
*vport
= hclge_get_vport(handle
);
6474 struct hclge_dev
*hdev
= vport
->back
;
6476 return hdev
->fd_active_type
== HCLGE_FD_TC_FLOWER_ACTIVE
;
6479 static int hclge_fd_parse_ring_cookie(struct hclge_dev
*hdev
, u64 ring_cookie
,
6480 u16
*vport_id
, u8
*action
, u16
*queue_id
)
6482 struct hclge_vport
*vport
= hdev
->vport
;
6484 if (ring_cookie
== RX_CLS_FLOW_DISC
) {
6485 *action
= HCLGE_FD_ACTION_DROP_PACKET
;
6487 u32 ring
= ethtool_get_flow_spec_ring(ring_cookie
);
6488 u8 vf
= ethtool_get_flow_spec_ring_vf(ring_cookie
);
6491 /* To keep consistent with user's configuration, minus 1 when
6492 * printing 'vf', because vf id from ethtool is added 1 for vf.
6494 if (vf
> hdev
->num_req_vfs
) {
6495 dev_err(&hdev
->pdev
->dev
,
6496 "Error: vf id (%u) should be less than %u\n",
6497 vf
- 1U, hdev
->num_req_vfs
);
6501 *vport_id
= vf
? hdev
->vport
[vf
].vport_id
: vport
->vport_id
;
6502 tqps
= hdev
->vport
[vf
].nic
.kinfo
.num_tqps
;
6505 dev_err(&hdev
->pdev
->dev
,
6506 "Error: queue id (%u) > max tqp num (%u)\n",
6511 *action
= HCLGE_FD_ACTION_SELECT_QUEUE
;
6518 static int hclge_add_fd_entry(struct hnae3_handle
*handle
,
6519 struct ethtool_rxnfc
*cmd
)
6521 struct hclge_vport
*vport
= hclge_get_vport(handle
);
6522 struct hclge_dev
*hdev
= vport
->back
;
6523 struct hclge_fd_user_def_info info
;
6524 u16 dst_vport_id
= 0, q_index
= 0;
6525 struct ethtool_rx_flow_spec
*fs
;
6526 struct hclge_fd_rule
*rule
;
6531 if (!hnae3_ae_dev_fd_supported(hdev
->ae_dev
)) {
6532 dev_err(&hdev
->pdev
->dev
,
6533 "flow table director is not supported\n");
6538 dev_err(&hdev
->pdev
->dev
,
6539 "please enable flow director first\n");
6543 fs
= (struct ethtool_rx_flow_spec
*)&cmd
->fs
;
6545 ret
= hclge_fd_check_spec(hdev
, fs
, &unused
, &info
);
6549 ret
= hclge_fd_parse_ring_cookie(hdev
, fs
->ring_cookie
, &dst_vport_id
,
6554 rule
= kzalloc(sizeof(*rule
), GFP_KERNEL
);
6558 ret
= hclge_fd_get_tuple(fs
, rule
, &info
);
6564 rule
->flow_type
= fs
->flow_type
;
6565 rule
->location
= fs
->location
;
6566 rule
->unused_tuple
= unused
;
6567 rule
->vf_id
= dst_vport_id
;
6568 rule
->queue_id
= q_index
;
6569 rule
->action
= action
;
6570 rule
->rule_type
= HCLGE_FD_EP_ACTIVE
;
6572 ret
= hclge_add_fd_entry_common(hdev
, rule
);
6579 static int hclge_del_fd_entry(struct hnae3_handle
*handle
,
6580 struct ethtool_rxnfc
*cmd
)
6582 struct hclge_vport
*vport
= hclge_get_vport(handle
);
6583 struct hclge_dev
*hdev
= vport
->back
;
6584 struct ethtool_rx_flow_spec
*fs
;
6587 if (!hnae3_ae_dev_fd_supported(hdev
->ae_dev
))
6590 fs
= (struct ethtool_rx_flow_spec
*)&cmd
->fs
;
6592 if (fs
->location
>= hdev
->fd_cfg
.rule_num
[HCLGE_FD_STAGE_1
])
6595 spin_lock_bh(&hdev
->fd_rule_lock
);
6596 if (hdev
->fd_active_type
== HCLGE_FD_TC_FLOWER_ACTIVE
||
6597 !test_bit(fs
->location
, hdev
->fd_bmap
)) {
6598 dev_err(&hdev
->pdev
->dev
,
6599 "Delete fail, rule %u is inexistent\n", fs
->location
);
6600 spin_unlock_bh(&hdev
->fd_rule_lock
);
6604 ret
= hclge_fd_tcam_config(hdev
, HCLGE_FD_STAGE_1
, true, fs
->location
,
6609 hclge_update_fd_list(hdev
, HCLGE_FD_DELETED
, fs
->location
, NULL
);
6612 spin_unlock_bh(&hdev
->fd_rule_lock
);
6616 static void hclge_clear_fd_rules_in_list(struct hclge_dev
*hdev
,
6619 struct hclge_fd_rule
*rule
;
6620 struct hlist_node
*node
;
6623 spin_lock_bh(&hdev
->fd_rule_lock
);
6625 for_each_set_bit(location
, hdev
->fd_bmap
,
6626 hdev
->fd_cfg
.rule_num
[HCLGE_FD_STAGE_1
])
6627 hclge_fd_tcam_config(hdev
, HCLGE_FD_STAGE_1
, true, location
,
6631 hlist_for_each_entry_safe(rule
, node
, &hdev
->fd_rule_list
,
6633 hlist_del(&rule
->rule_node
);
6636 hdev
->fd_active_type
= HCLGE_FD_RULE_NONE
;
6637 hdev
->hclge_fd_rule_num
= 0;
6638 bitmap_zero(hdev
->fd_bmap
,
6639 hdev
->fd_cfg
.rule_num
[HCLGE_FD_STAGE_1
]);
6642 spin_unlock_bh(&hdev
->fd_rule_lock
);
6645 static void hclge_del_all_fd_entries(struct hclge_dev
*hdev
)
6647 if (!hnae3_ae_dev_fd_supported(hdev
->ae_dev
))
6650 hclge_clear_fd_rules_in_list(hdev
, true);
6651 hclge_fd_disable_user_def(hdev
);
6654 static int hclge_restore_fd_entries(struct hnae3_handle
*handle
)
6656 struct hclge_vport
*vport
= hclge_get_vport(handle
);
6657 struct hclge_dev
*hdev
= vport
->back
;
6658 struct hclge_fd_rule
*rule
;
6659 struct hlist_node
*node
;
6661 /* Return ok here, because reset error handling will check this
6662 * return value. If error is returned here, the reset process will
6665 if (!hnae3_ae_dev_fd_supported(hdev
->ae_dev
))
6668 /* if fd is disabled, should not restore it when reset */
6672 spin_lock_bh(&hdev
->fd_rule_lock
);
6673 hlist_for_each_entry_safe(rule
, node
, &hdev
->fd_rule_list
, rule_node
) {
6674 if (rule
->state
== HCLGE_FD_ACTIVE
)
6675 rule
->state
= HCLGE_FD_TO_ADD
;
6677 spin_unlock_bh(&hdev
->fd_rule_lock
);
6678 set_bit(HCLGE_STATE_FD_TBL_CHANGED
, &hdev
->state
);
6683 static int hclge_get_fd_rule_cnt(struct hnae3_handle
*handle
,
6684 struct ethtool_rxnfc
*cmd
)
6686 struct hclge_vport
*vport
= hclge_get_vport(handle
);
6687 struct hclge_dev
*hdev
= vport
->back
;
6689 if (!hnae3_ae_dev_fd_supported(hdev
->ae_dev
) || hclge_is_cls_flower_active(handle
))
6692 cmd
->rule_cnt
= hdev
->hclge_fd_rule_num
;
6693 cmd
->data
= hdev
->fd_cfg
.rule_num
[HCLGE_FD_STAGE_1
];
6698 static void hclge_fd_get_tcpip4_info(struct hclge_fd_rule
*rule
,
6699 struct ethtool_tcpip4_spec
*spec
,
6700 struct ethtool_tcpip4_spec
*spec_mask
)
6702 spec
->ip4src
= cpu_to_be32(rule
->tuples
.src_ip
[IPV4_INDEX
]);
6703 spec_mask
->ip4src
= rule
->unused_tuple
& BIT(INNER_SRC_IP
) ?
6704 0 : cpu_to_be32(rule
->tuples_mask
.src_ip
[IPV4_INDEX
]);
6706 spec
->ip4dst
= cpu_to_be32(rule
->tuples
.dst_ip
[IPV4_INDEX
]);
6707 spec_mask
->ip4dst
= rule
->unused_tuple
& BIT(INNER_DST_IP
) ?
6708 0 : cpu_to_be32(rule
->tuples_mask
.dst_ip
[IPV4_INDEX
]);
6710 spec
->psrc
= cpu_to_be16(rule
->tuples
.src_port
);
6711 spec_mask
->psrc
= rule
->unused_tuple
& BIT(INNER_SRC_PORT
) ?
6712 0 : cpu_to_be16(rule
->tuples_mask
.src_port
);
6714 spec
->pdst
= cpu_to_be16(rule
->tuples
.dst_port
);
6715 spec_mask
->pdst
= rule
->unused_tuple
& BIT(INNER_DST_PORT
) ?
6716 0 : cpu_to_be16(rule
->tuples_mask
.dst_port
);
6718 spec
->tos
= rule
->tuples
.ip_tos
;
6719 spec_mask
->tos
= rule
->unused_tuple
& BIT(INNER_IP_TOS
) ?
6720 0 : rule
->tuples_mask
.ip_tos
;
6723 static void hclge_fd_get_ip4_info(struct hclge_fd_rule
*rule
,
6724 struct ethtool_usrip4_spec
*spec
,
6725 struct ethtool_usrip4_spec
*spec_mask
)
6727 spec
->ip4src
= cpu_to_be32(rule
->tuples
.src_ip
[IPV4_INDEX
]);
6728 spec_mask
->ip4src
= rule
->unused_tuple
& BIT(INNER_SRC_IP
) ?
6729 0 : cpu_to_be32(rule
->tuples_mask
.src_ip
[IPV4_INDEX
]);
6731 spec
->ip4dst
= cpu_to_be32(rule
->tuples
.dst_ip
[IPV4_INDEX
]);
6732 spec_mask
->ip4dst
= rule
->unused_tuple
& BIT(INNER_DST_IP
) ?
6733 0 : cpu_to_be32(rule
->tuples_mask
.dst_ip
[IPV4_INDEX
]);
6735 spec
->tos
= rule
->tuples
.ip_tos
;
6736 spec_mask
->tos
= rule
->unused_tuple
& BIT(INNER_IP_TOS
) ?
6737 0 : rule
->tuples_mask
.ip_tos
;
6739 spec
->proto
= rule
->tuples
.ip_proto
;
6740 spec_mask
->proto
= rule
->unused_tuple
& BIT(INNER_IP_PROTO
) ?
6741 0 : rule
->tuples_mask
.ip_proto
;
6743 spec
->ip_ver
= ETH_RX_NFC_IP4
;
6746 static void hclge_fd_get_tcpip6_info(struct hclge_fd_rule
*rule
,
6747 struct ethtool_tcpip6_spec
*spec
,
6748 struct ethtool_tcpip6_spec
*spec_mask
)
6750 ipv6_addr_cpu_to_be32(spec
->ip6src
, rule
->tuples
.src_ip
);
6751 ipv6_addr_cpu_to_be32(spec
->ip6dst
, rule
->tuples
.dst_ip
);
6752 if (rule
->unused_tuple
& BIT(INNER_SRC_IP
))
6753 memset(spec_mask
->ip6src
, 0, sizeof(spec_mask
->ip6src
));
6755 ipv6_addr_cpu_to_be32(spec_mask
->ip6src
,
6756 rule
->tuples_mask
.src_ip
);
6758 if (rule
->unused_tuple
& BIT(INNER_DST_IP
))
6759 memset(spec_mask
->ip6dst
, 0, sizeof(spec_mask
->ip6dst
));
6761 ipv6_addr_cpu_to_be32(spec_mask
->ip6dst
,
6762 rule
->tuples_mask
.dst_ip
);
6764 spec
->tclass
= rule
->tuples
.ip_tos
;
6765 spec_mask
->tclass
= rule
->unused_tuple
& BIT(INNER_IP_TOS
) ?
6766 0 : rule
->tuples_mask
.ip_tos
;
6768 spec
->psrc
= cpu_to_be16(rule
->tuples
.src_port
);
6769 spec_mask
->psrc
= rule
->unused_tuple
& BIT(INNER_SRC_PORT
) ?
6770 0 : cpu_to_be16(rule
->tuples_mask
.src_port
);
6772 spec
->pdst
= cpu_to_be16(rule
->tuples
.dst_port
);
6773 spec_mask
->pdst
= rule
->unused_tuple
& BIT(INNER_DST_PORT
) ?
6774 0 : cpu_to_be16(rule
->tuples_mask
.dst_port
);
6777 static void hclge_fd_get_ip6_info(struct hclge_fd_rule
*rule
,
6778 struct ethtool_usrip6_spec
*spec
,
6779 struct ethtool_usrip6_spec
*spec_mask
)
6781 ipv6_addr_cpu_to_be32(spec
->ip6src
, rule
->tuples
.src_ip
);
6782 ipv6_addr_cpu_to_be32(spec
->ip6dst
, rule
->tuples
.dst_ip
);
6783 if (rule
->unused_tuple
& BIT(INNER_SRC_IP
))
6784 memset(spec_mask
->ip6src
, 0, sizeof(spec_mask
->ip6src
));
6786 ipv6_addr_cpu_to_be32(spec_mask
->ip6src
,
6787 rule
->tuples_mask
.src_ip
);
6789 if (rule
->unused_tuple
& BIT(INNER_DST_IP
))
6790 memset(spec_mask
->ip6dst
, 0, sizeof(spec_mask
->ip6dst
));
6792 ipv6_addr_cpu_to_be32(spec_mask
->ip6dst
,
6793 rule
->tuples_mask
.dst_ip
);
6795 spec
->tclass
= rule
->tuples
.ip_tos
;
6796 spec_mask
->tclass
= rule
->unused_tuple
& BIT(INNER_IP_TOS
) ?
6797 0 : rule
->tuples_mask
.ip_tos
;
6799 spec
->l4_proto
= rule
->tuples
.ip_proto
;
6800 spec_mask
->l4_proto
= rule
->unused_tuple
& BIT(INNER_IP_PROTO
) ?
6801 0 : rule
->tuples_mask
.ip_proto
;
6804 static void hclge_fd_get_ether_info(struct hclge_fd_rule
*rule
,
6805 struct ethhdr
*spec
,
6806 struct ethhdr
*spec_mask
)
6808 ether_addr_copy(spec
->h_source
, rule
->tuples
.src_mac
);
6809 ether_addr_copy(spec
->h_dest
, rule
->tuples
.dst_mac
);
6811 if (rule
->unused_tuple
& BIT(INNER_SRC_MAC
))
6812 eth_zero_addr(spec_mask
->h_source
);
6814 ether_addr_copy(spec_mask
->h_source
, rule
->tuples_mask
.src_mac
);
6816 if (rule
->unused_tuple
& BIT(INNER_DST_MAC
))
6817 eth_zero_addr(spec_mask
->h_dest
);
6819 ether_addr_copy(spec_mask
->h_dest
, rule
->tuples_mask
.dst_mac
);
6821 spec
->h_proto
= cpu_to_be16(rule
->tuples
.ether_proto
);
6822 spec_mask
->h_proto
= rule
->unused_tuple
& BIT(INNER_ETH_TYPE
) ?
6823 0 : cpu_to_be16(rule
->tuples_mask
.ether_proto
);
6826 static void hclge_fd_get_user_def_info(struct ethtool_rx_flow_spec
*fs
,
6827 struct hclge_fd_rule
*rule
)
6829 if ((rule
->unused_tuple
& HCLGE_FD_TUPLE_USER_DEF_TUPLES
) ==
6830 HCLGE_FD_TUPLE_USER_DEF_TUPLES
) {
6831 fs
->h_ext
.data
[0] = 0;
6832 fs
->h_ext
.data
[1] = 0;
6833 fs
->m_ext
.data
[0] = 0;
6834 fs
->m_ext
.data
[1] = 0;
6836 fs
->h_ext
.data
[0] = cpu_to_be32(rule
->ep
.user_def
.offset
);
6837 fs
->h_ext
.data
[1] = cpu_to_be32(rule
->ep
.user_def
.data
);
6839 cpu_to_be32(HCLGE_FD_USER_DEF_OFFSET_UNMASK
);
6840 fs
->m_ext
.data
[1] = cpu_to_be32(rule
->ep
.user_def
.data_mask
);
6844 static void hclge_fd_get_ext_info(struct ethtool_rx_flow_spec
*fs
,
6845 struct hclge_fd_rule
*rule
)
6847 if (fs
->flow_type
& FLOW_EXT
) {
6848 fs
->h_ext
.vlan_tci
= cpu_to_be16(rule
->tuples
.vlan_tag1
);
6849 fs
->m_ext
.vlan_tci
=
6850 rule
->unused_tuple
& BIT(INNER_VLAN_TAG_FST
) ?
6851 0 : cpu_to_be16(rule
->tuples_mask
.vlan_tag1
);
6853 hclge_fd_get_user_def_info(fs
, rule
);
6856 if (fs
->flow_type
& FLOW_MAC_EXT
) {
6857 ether_addr_copy(fs
->h_ext
.h_dest
, rule
->tuples
.dst_mac
);
6858 if (rule
->unused_tuple
& BIT(INNER_DST_MAC
))
6859 eth_zero_addr(fs
->m_u
.ether_spec
.h_dest
);
6861 ether_addr_copy(fs
->m_u
.ether_spec
.h_dest
,
6862 rule
->tuples_mask
.dst_mac
);
6866 static struct hclge_fd_rule
*hclge_get_fd_rule(struct hclge_dev
*hdev
,
6869 struct hclge_fd_rule
*rule
= NULL
;
6870 struct hlist_node
*node2
;
6872 hlist_for_each_entry_safe(rule
, node2
, &hdev
->fd_rule_list
, rule_node
) {
6873 if (rule
->location
== location
)
6875 else if (rule
->location
> location
)
6882 static void hclge_fd_get_ring_cookie(struct ethtool_rx_flow_spec
*fs
,
6883 struct hclge_fd_rule
*rule
)
6885 if (rule
->action
== HCLGE_FD_ACTION_DROP_PACKET
) {
6886 fs
->ring_cookie
= RX_CLS_FLOW_DISC
;
6890 fs
->ring_cookie
= rule
->queue_id
;
6891 vf_id
= rule
->vf_id
;
6892 vf_id
<<= ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF
;
6893 fs
->ring_cookie
|= vf_id
;
6897 static int hclge_get_fd_rule_info(struct hnae3_handle
*handle
,
6898 struct ethtool_rxnfc
*cmd
)
6900 struct hclge_vport
*vport
= hclge_get_vport(handle
);
6901 struct hclge_fd_rule
*rule
= NULL
;
6902 struct hclge_dev
*hdev
= vport
->back
;
6903 struct ethtool_rx_flow_spec
*fs
;
6905 if (!hnae3_ae_dev_fd_supported(hdev
->ae_dev
))
6908 fs
= (struct ethtool_rx_flow_spec
*)&cmd
->fs
;
6910 spin_lock_bh(&hdev
->fd_rule_lock
);
6912 rule
= hclge_get_fd_rule(hdev
, fs
->location
);
6914 spin_unlock_bh(&hdev
->fd_rule_lock
);
6918 fs
->flow_type
= rule
->flow_type
;
6919 switch (fs
->flow_type
& ~(FLOW_EXT
| FLOW_MAC_EXT
)) {
6923 hclge_fd_get_tcpip4_info(rule
, &fs
->h_u
.tcp_ip4_spec
,
6924 &fs
->m_u
.tcp_ip4_spec
);
6927 hclge_fd_get_ip4_info(rule
, &fs
->h_u
.usr_ip4_spec
,
6928 &fs
->m_u
.usr_ip4_spec
);
6933 hclge_fd_get_tcpip6_info(rule
, &fs
->h_u
.tcp_ip6_spec
,
6934 &fs
->m_u
.tcp_ip6_spec
);
6936 case IPV6_USER_FLOW
:
6937 hclge_fd_get_ip6_info(rule
, &fs
->h_u
.usr_ip6_spec
,
6938 &fs
->m_u
.usr_ip6_spec
);
6940 /* The flow type of fd rule has been checked before adding in to rule
6941 * list. As other flow types have been handled, it must be ETHER_FLOW
6942 * for the default case
6945 hclge_fd_get_ether_info(rule
, &fs
->h_u
.ether_spec
,
6946 &fs
->m_u
.ether_spec
);
6950 hclge_fd_get_ext_info(fs
, rule
);
6952 hclge_fd_get_ring_cookie(fs
, rule
);
6954 spin_unlock_bh(&hdev
->fd_rule_lock
);
6959 static int hclge_get_all_rules(struct hnae3_handle
*handle
,
6960 struct ethtool_rxnfc
*cmd
, u32
*rule_locs
)
6962 struct hclge_vport
*vport
= hclge_get_vport(handle
);
6963 struct hclge_dev
*hdev
= vport
->back
;
6964 struct hclge_fd_rule
*rule
;
6965 struct hlist_node
*node2
;
6968 if (!hnae3_ae_dev_fd_supported(hdev
->ae_dev
))
6971 cmd
->data
= hdev
->fd_cfg
.rule_num
[HCLGE_FD_STAGE_1
];
6973 spin_lock_bh(&hdev
->fd_rule_lock
);
6974 hlist_for_each_entry_safe(rule
, node2
,
6975 &hdev
->fd_rule_list
, rule_node
) {
6976 if (cnt
== cmd
->rule_cnt
) {
6977 spin_unlock_bh(&hdev
->fd_rule_lock
);
6981 if (rule
->state
== HCLGE_FD_TO_DEL
)
6984 rule_locs
[cnt
] = rule
->location
;
6988 spin_unlock_bh(&hdev
->fd_rule_lock
);
6990 cmd
->rule_cnt
= cnt
;
6995 static void hclge_fd_get_flow_tuples(const struct flow_keys
*fkeys
,
6996 struct hclge_fd_rule_tuples
*tuples
)
6998 #define flow_ip6_src fkeys->addrs.v6addrs.src.in6_u.u6_addr32
6999 #define flow_ip6_dst fkeys->addrs.v6addrs.dst.in6_u.u6_addr32
7001 tuples
->ether_proto
= be16_to_cpu(fkeys
->basic
.n_proto
);
7002 tuples
->ip_proto
= fkeys
->basic
.ip_proto
;
7003 tuples
->dst_port
= be16_to_cpu(fkeys
->ports
.dst
);
7005 if (fkeys
->basic
.n_proto
== htons(ETH_P_IP
)) {
7006 tuples
->src_ip
[3] = be32_to_cpu(fkeys
->addrs
.v4addrs
.src
);
7007 tuples
->dst_ip
[3] = be32_to_cpu(fkeys
->addrs
.v4addrs
.dst
);
7011 for (i
= 0; i
< IPV6_ADDR_WORDS
; i
++) {
7012 tuples
->src_ip
[i
] = be32_to_cpu(flow_ip6_src
[i
]);
7013 tuples
->dst_ip
[i
] = be32_to_cpu(flow_ip6_dst
[i
]);
7018 /* traverse all rules, check whether an existed rule has the same tuples */
7019 static struct hclge_fd_rule
*
7020 hclge_fd_search_flow_keys(struct hclge_dev
*hdev
,
7021 const struct hclge_fd_rule_tuples
*tuples
)
7023 struct hclge_fd_rule
*rule
= NULL
;
7024 struct hlist_node
*node
;
7026 hlist_for_each_entry_safe(rule
, node
, &hdev
->fd_rule_list
, rule_node
) {
7027 if (!memcmp(tuples
, &rule
->tuples
, sizeof(*tuples
)))
7034 static void hclge_fd_build_arfs_rule(const struct hclge_fd_rule_tuples
*tuples
,
7035 struct hclge_fd_rule
*rule
)
7037 rule
->unused_tuple
= BIT(INNER_SRC_MAC
) | BIT(INNER_DST_MAC
) |
7038 BIT(INNER_VLAN_TAG_FST
) | BIT(INNER_IP_TOS
) |
7039 BIT(INNER_SRC_PORT
);
7042 rule
->rule_type
= HCLGE_FD_ARFS_ACTIVE
;
7043 rule
->state
= HCLGE_FD_TO_ADD
;
7044 if (tuples
->ether_proto
== ETH_P_IP
) {
7045 if (tuples
->ip_proto
== IPPROTO_TCP
)
7046 rule
->flow_type
= TCP_V4_FLOW
;
7048 rule
->flow_type
= UDP_V4_FLOW
;
7050 if (tuples
->ip_proto
== IPPROTO_TCP
)
7051 rule
->flow_type
= TCP_V6_FLOW
;
7053 rule
->flow_type
= UDP_V6_FLOW
;
7055 memcpy(&rule
->tuples
, tuples
, sizeof(rule
->tuples
));
7056 memset(&rule
->tuples_mask
, 0xFF, sizeof(rule
->tuples_mask
));
7059 static int hclge_add_fd_entry_by_arfs(struct hnae3_handle
*handle
, u16 queue_id
,
7060 u16 flow_id
, struct flow_keys
*fkeys
)
7062 struct hclge_vport
*vport
= hclge_get_vport(handle
);
7063 struct hclge_fd_rule_tuples new_tuples
= {};
7064 struct hclge_dev
*hdev
= vport
->back
;
7065 struct hclge_fd_rule
*rule
;
7068 if (!hnae3_ae_dev_fd_supported(hdev
->ae_dev
))
7071 /* when there is already fd rule existed add by user,
7072 * arfs should not work
7074 spin_lock_bh(&hdev
->fd_rule_lock
);
7075 if (hdev
->fd_active_type
!= HCLGE_FD_ARFS_ACTIVE
&&
7076 hdev
->fd_active_type
!= HCLGE_FD_RULE_NONE
) {
7077 spin_unlock_bh(&hdev
->fd_rule_lock
);
7081 hclge_fd_get_flow_tuples(fkeys
, &new_tuples
);
7083 /* check is there flow director filter existed for this flow,
7084 * if not, create a new filter for it;
7085 * if filter exist with different queue id, modify the filter;
7086 * if filter exist with same queue id, do nothing
7088 rule
= hclge_fd_search_flow_keys(hdev
, &new_tuples
);
7090 bit_id
= find_first_zero_bit(hdev
->fd_bmap
, MAX_FD_FILTER_NUM
);
7091 if (bit_id
>= hdev
->fd_cfg
.rule_num
[HCLGE_FD_STAGE_1
]) {
7092 spin_unlock_bh(&hdev
->fd_rule_lock
);
7096 rule
= kzalloc(sizeof(*rule
), GFP_ATOMIC
);
7098 spin_unlock_bh(&hdev
->fd_rule_lock
);
7102 rule
->location
= bit_id
;
7103 rule
->arfs
.flow_id
= flow_id
;
7104 rule
->queue_id
= queue_id
;
7105 hclge_fd_build_arfs_rule(&new_tuples
, rule
);
7106 hclge_update_fd_list(hdev
, rule
->state
, rule
->location
, rule
);
7107 hdev
->fd_active_type
= HCLGE_FD_ARFS_ACTIVE
;
7108 } else if (rule
->queue_id
!= queue_id
) {
7109 rule
->queue_id
= queue_id
;
7110 rule
->state
= HCLGE_FD_TO_ADD
;
7111 set_bit(HCLGE_STATE_FD_TBL_CHANGED
, &hdev
->state
);
7112 hclge_task_schedule(hdev
, 0);
7114 spin_unlock_bh(&hdev
->fd_rule_lock
);
7115 return rule
->location
;
7118 static void hclge_rfs_filter_expire(struct hclge_dev
*hdev
)
7120 #ifdef CONFIG_RFS_ACCEL
7121 struct hnae3_handle
*handle
= &hdev
->vport
[0].nic
;
7122 struct hclge_fd_rule
*rule
;
7123 struct hlist_node
*node
;
7125 spin_lock_bh(&hdev
->fd_rule_lock
);
7126 if (hdev
->fd_active_type
!= HCLGE_FD_ARFS_ACTIVE
) {
7127 spin_unlock_bh(&hdev
->fd_rule_lock
);
7130 hlist_for_each_entry_safe(rule
, node
, &hdev
->fd_rule_list
, rule_node
) {
7131 if (rule
->state
!= HCLGE_FD_ACTIVE
)
7133 if (rps_may_expire_flow(handle
->netdev
, rule
->queue_id
,
7134 rule
->arfs
.flow_id
, rule
->location
)) {
7135 rule
->state
= HCLGE_FD_TO_DEL
;
7136 set_bit(HCLGE_STATE_FD_TBL_CHANGED
, &hdev
->state
);
7139 spin_unlock_bh(&hdev
->fd_rule_lock
);
7143 /* make sure being called after lock up with fd_rule_lock */
7144 static int hclge_clear_arfs_rules(struct hclge_dev
*hdev
)
7146 #ifdef CONFIG_RFS_ACCEL
7147 struct hclge_fd_rule
*rule
;
7148 struct hlist_node
*node
;
7151 if (hdev
->fd_active_type
!= HCLGE_FD_ARFS_ACTIVE
)
7154 hlist_for_each_entry_safe(rule
, node
, &hdev
->fd_rule_list
, rule_node
) {
7155 switch (rule
->state
) {
7156 case HCLGE_FD_TO_DEL
:
7157 case HCLGE_FD_ACTIVE
:
7158 ret
= hclge_fd_tcam_config(hdev
, HCLGE_FD_STAGE_1
, true,
7159 rule
->location
, NULL
, false);
7163 case HCLGE_FD_TO_ADD
:
7164 hclge_fd_dec_rule_cnt(hdev
, rule
->location
);
7165 hlist_del(&rule
->rule_node
);
7172 hclge_sync_fd_state(hdev
);
7178 static void hclge_get_cls_key_basic(const struct flow_rule
*flow
,
7179 struct hclge_fd_rule
*rule
)
7181 if (flow_rule_match_key(flow
, FLOW_DISSECTOR_KEY_BASIC
)) {
7182 struct flow_match_basic match
;
7183 u16 ethtype_key
, ethtype_mask
;
7185 flow_rule_match_basic(flow
, &match
);
7186 ethtype_key
= ntohs(match
.key
->n_proto
);
7187 ethtype_mask
= ntohs(match
.mask
->n_proto
);
7189 if (ethtype_key
== ETH_P_ALL
) {
7193 rule
->tuples
.ether_proto
= ethtype_key
;
7194 rule
->tuples_mask
.ether_proto
= ethtype_mask
;
7195 rule
->tuples
.ip_proto
= match
.key
->ip_proto
;
7196 rule
->tuples_mask
.ip_proto
= match
.mask
->ip_proto
;
7198 rule
->unused_tuple
|= BIT(INNER_IP_PROTO
);
7199 rule
->unused_tuple
|= BIT(INNER_ETH_TYPE
);
7203 static void hclge_get_cls_key_mac(const struct flow_rule
*flow
,
7204 struct hclge_fd_rule
*rule
)
7206 if (flow_rule_match_key(flow
, FLOW_DISSECTOR_KEY_ETH_ADDRS
)) {
7207 struct flow_match_eth_addrs match
;
7209 flow_rule_match_eth_addrs(flow
, &match
);
7210 ether_addr_copy(rule
->tuples
.dst_mac
, match
.key
->dst
);
7211 ether_addr_copy(rule
->tuples_mask
.dst_mac
, match
.mask
->dst
);
7212 ether_addr_copy(rule
->tuples
.src_mac
, match
.key
->src
);
7213 ether_addr_copy(rule
->tuples_mask
.src_mac
, match
.mask
->src
);
7215 rule
->unused_tuple
|= BIT(INNER_DST_MAC
);
7216 rule
->unused_tuple
|= BIT(INNER_SRC_MAC
);
7220 static void hclge_get_cls_key_vlan(const struct flow_rule
*flow
,
7221 struct hclge_fd_rule
*rule
)
7223 if (flow_rule_match_key(flow
, FLOW_DISSECTOR_KEY_VLAN
)) {
7224 struct flow_match_vlan match
;
7226 flow_rule_match_vlan(flow
, &match
);
7227 rule
->tuples
.vlan_tag1
= match
.key
->vlan_id
|
7228 (match
.key
->vlan_priority
<< VLAN_PRIO_SHIFT
);
7229 rule
->tuples_mask
.vlan_tag1
= match
.mask
->vlan_id
|
7230 (match
.mask
->vlan_priority
<< VLAN_PRIO_SHIFT
);
7232 rule
->unused_tuple
|= BIT(INNER_VLAN_TAG_FST
);
7236 static int hclge_get_cls_key_ip(const struct flow_rule
*flow
,
7237 struct hclge_fd_rule
*rule
,
7238 struct netlink_ext_ack
*extack
)
7242 if (flow_rule_match_key(flow
, FLOW_DISSECTOR_KEY_CONTROL
)) {
7243 struct flow_match_control match
;
7245 flow_rule_match_control(flow
, &match
);
7246 addr_type
= match
.key
->addr_type
;
7248 if (flow_rule_has_control_flags(match
.mask
->flags
, extack
))
7252 if (addr_type
== FLOW_DISSECTOR_KEY_IPV4_ADDRS
) {
7253 struct flow_match_ipv4_addrs match
;
7255 flow_rule_match_ipv4_addrs(flow
, &match
);
7256 rule
->tuples
.src_ip
[IPV4_INDEX
] = be32_to_cpu(match
.key
->src
);
7257 rule
->tuples_mask
.src_ip
[IPV4_INDEX
] =
7258 be32_to_cpu(match
.mask
->src
);
7259 rule
->tuples
.dst_ip
[IPV4_INDEX
] = be32_to_cpu(match
.key
->dst
);
7260 rule
->tuples_mask
.dst_ip
[IPV4_INDEX
] =
7261 be32_to_cpu(match
.mask
->dst
);
7262 } else if (addr_type
== FLOW_DISSECTOR_KEY_IPV6_ADDRS
) {
7263 struct flow_match_ipv6_addrs match
;
7265 flow_rule_match_ipv6_addrs(flow
, &match
);
7266 ipv6_addr_be32_to_cpu(rule
->tuples
.src_ip
,
7267 match
.key
->src
.s6_addr32
);
7268 ipv6_addr_be32_to_cpu(rule
->tuples_mask
.src_ip
,
7269 match
.mask
->src
.s6_addr32
);
7270 ipv6_addr_be32_to_cpu(rule
->tuples
.dst_ip
,
7271 match
.key
->dst
.s6_addr32
);
7272 ipv6_addr_be32_to_cpu(rule
->tuples_mask
.dst_ip
,
7273 match
.mask
->dst
.s6_addr32
);
7275 rule
->unused_tuple
|= BIT(INNER_SRC_IP
);
7276 rule
->unused_tuple
|= BIT(INNER_DST_IP
);
7282 static void hclge_get_cls_key_port(const struct flow_rule
*flow
,
7283 struct hclge_fd_rule
*rule
)
7285 if (flow_rule_match_key(flow
, FLOW_DISSECTOR_KEY_PORTS
)) {
7286 struct flow_match_ports match
;
7288 flow_rule_match_ports(flow
, &match
);
7290 rule
->tuples
.src_port
= be16_to_cpu(match
.key
->src
);
7291 rule
->tuples_mask
.src_port
= be16_to_cpu(match
.mask
->src
);
7292 rule
->tuples
.dst_port
= be16_to_cpu(match
.key
->dst
);
7293 rule
->tuples_mask
.dst_port
= be16_to_cpu(match
.mask
->dst
);
7295 rule
->unused_tuple
|= BIT(INNER_SRC_PORT
);
7296 rule
->unused_tuple
|= BIT(INNER_DST_PORT
);
7300 static int hclge_parse_cls_flower(struct hclge_dev
*hdev
,
7301 struct flow_cls_offload
*cls_flower
,
7302 struct hclge_fd_rule
*rule
)
7304 struct flow_rule
*flow
= flow_cls_offload_flow_rule(cls_flower
);
7305 struct netlink_ext_ack
*extack
= cls_flower
->common
.extack
;
7306 struct flow_dissector
*dissector
= flow
->match
.dissector
;
7309 if (dissector
->used_keys
&
7310 ~(BIT_ULL(FLOW_DISSECTOR_KEY_CONTROL
) |
7311 BIT_ULL(FLOW_DISSECTOR_KEY_BASIC
) |
7312 BIT_ULL(FLOW_DISSECTOR_KEY_ETH_ADDRS
) |
7313 BIT_ULL(FLOW_DISSECTOR_KEY_VLAN
) |
7314 BIT_ULL(FLOW_DISSECTOR_KEY_IPV4_ADDRS
) |
7315 BIT_ULL(FLOW_DISSECTOR_KEY_IPV6_ADDRS
) |
7316 BIT_ULL(FLOW_DISSECTOR_KEY_PORTS
))) {
7317 dev_err(&hdev
->pdev
->dev
, "unsupported key set: %#llx\n",
7318 dissector
->used_keys
);
7322 hclge_get_cls_key_basic(flow
, rule
);
7323 hclge_get_cls_key_mac(flow
, rule
);
7324 hclge_get_cls_key_vlan(flow
, rule
);
7326 ret
= hclge_get_cls_key_ip(flow
, rule
, extack
);
7330 hclge_get_cls_key_port(flow
, rule
);
7335 static int hclge_check_cls_flower(struct hclge_dev
*hdev
,
7336 struct flow_cls_offload
*cls_flower
, int tc
)
7338 u32 prio
= cls_flower
->common
.prio
;
7340 if (tc
< 0 || tc
> hdev
->tc_max
) {
7341 dev_err(&hdev
->pdev
->dev
, "invalid traffic class\n");
7346 prio
> hdev
->fd_cfg
.rule_num
[HCLGE_FD_STAGE_1
]) {
7347 dev_err(&hdev
->pdev
->dev
,
7348 "prio %u should be in range[1, %u]\n",
7349 prio
, hdev
->fd_cfg
.rule_num
[HCLGE_FD_STAGE_1
]);
7353 if (test_bit(prio
- 1, hdev
->fd_bmap
)) {
7354 dev_err(&hdev
->pdev
->dev
, "prio %u is already used\n", prio
);
7360 static int hclge_add_cls_flower(struct hnae3_handle
*handle
,
7361 struct flow_cls_offload
*cls_flower
,
7364 struct hclge_vport
*vport
= hclge_get_vport(handle
);
7365 struct hclge_dev
*hdev
= vport
->back
;
7366 struct hclge_fd_rule
*rule
;
7369 if (!hnae3_ae_dev_fd_supported(hdev
->ae_dev
)) {
7370 dev_err(&hdev
->pdev
->dev
,
7371 "cls flower is not supported\n");
7375 ret
= hclge_check_cls_flower(hdev
, cls_flower
, tc
);
7377 dev_err(&hdev
->pdev
->dev
,
7378 "failed to check cls flower params, ret = %d\n", ret
);
7382 rule
= kzalloc(sizeof(*rule
), GFP_KERNEL
);
7386 ret
= hclge_parse_cls_flower(hdev
, cls_flower
, rule
);
7392 rule
->action
= HCLGE_FD_ACTION_SELECT_TC
;
7393 rule
->cls_flower
.tc
= tc
;
7394 rule
->location
= cls_flower
->common
.prio
- 1;
7396 rule
->cls_flower
.cookie
= cls_flower
->cookie
;
7397 rule
->rule_type
= HCLGE_FD_TC_FLOWER_ACTIVE
;
7399 ret
= hclge_add_fd_entry_common(hdev
, rule
);
7406 static struct hclge_fd_rule
*hclge_find_cls_flower(struct hclge_dev
*hdev
,
7407 unsigned long cookie
)
7409 struct hclge_fd_rule
*rule
;
7410 struct hlist_node
*node
;
7412 hlist_for_each_entry_safe(rule
, node
, &hdev
->fd_rule_list
, rule_node
) {
7413 if (rule
->cls_flower
.cookie
== cookie
)
7420 static int hclge_del_cls_flower(struct hnae3_handle
*handle
,
7421 struct flow_cls_offload
*cls_flower
)
7423 struct hclge_vport
*vport
= hclge_get_vport(handle
);
7424 struct hclge_dev
*hdev
= vport
->back
;
7425 struct hclge_fd_rule
*rule
;
7428 if (!hnae3_ae_dev_fd_supported(hdev
->ae_dev
))
7431 spin_lock_bh(&hdev
->fd_rule_lock
);
7433 rule
= hclge_find_cls_flower(hdev
, cls_flower
->cookie
);
7435 spin_unlock_bh(&hdev
->fd_rule_lock
);
7439 ret
= hclge_fd_tcam_config(hdev
, HCLGE_FD_STAGE_1
, true, rule
->location
,
7442 /* if tcam config fail, set rule state to TO_DEL,
7443 * so the rule will be deleted when periodic
7444 * task being scheduled.
7446 hclge_update_fd_list(hdev
, HCLGE_FD_TO_DEL
, rule
->location
, NULL
);
7447 set_bit(HCLGE_STATE_FD_TBL_CHANGED
, &hdev
->state
);
7448 spin_unlock_bh(&hdev
->fd_rule_lock
);
7452 hclge_update_fd_list(hdev
, HCLGE_FD_DELETED
, rule
->location
, NULL
);
7453 spin_unlock_bh(&hdev
->fd_rule_lock
);
7458 static void hclge_sync_fd_list(struct hclge_dev
*hdev
, struct hlist_head
*hlist
)
7460 struct hclge_fd_rule
*rule
;
7461 struct hlist_node
*node
;
7464 if (!test_and_clear_bit(HCLGE_STATE_FD_TBL_CHANGED
, &hdev
->state
))
7467 spin_lock_bh(&hdev
->fd_rule_lock
);
7469 hlist_for_each_entry_safe(rule
, node
, hlist
, rule_node
) {
7470 switch (rule
->state
) {
7471 case HCLGE_FD_TO_ADD
:
7472 ret
= hclge_fd_config_rule(hdev
, rule
);
7475 rule
->state
= HCLGE_FD_ACTIVE
;
7477 case HCLGE_FD_TO_DEL
:
7478 ret
= hclge_fd_tcam_config(hdev
, HCLGE_FD_STAGE_1
, true,
7479 rule
->location
, NULL
, false);
7482 hclge_fd_dec_rule_cnt(hdev
, rule
->location
);
7483 hclge_fd_free_node(hdev
, rule
);
7492 set_bit(HCLGE_STATE_FD_TBL_CHANGED
, &hdev
->state
);
7494 spin_unlock_bh(&hdev
->fd_rule_lock
);
7497 static void hclge_sync_fd_table(struct hclge_dev
*hdev
)
7499 if (!hnae3_ae_dev_fd_supported(hdev
->ae_dev
))
7502 if (test_and_clear_bit(HCLGE_STATE_FD_CLEAR_ALL
, &hdev
->state
)) {
7503 bool clear_list
= hdev
->fd_active_type
== HCLGE_FD_ARFS_ACTIVE
;
7505 hclge_clear_fd_rules_in_list(hdev
, clear_list
);
7508 hclge_sync_fd_user_def_cfg(hdev
, false);
7510 hclge_sync_fd_list(hdev
, &hdev
->fd_rule_list
);
7513 static bool hclge_get_hw_reset_stat(struct hnae3_handle
*handle
)
7515 struct hclge_vport
*vport
= hclge_get_vport(handle
);
7516 struct hclge_dev
*hdev
= vport
->back
;
7518 return hclge_read_dev(&hdev
->hw
, HCLGE_GLOBAL_RESET_REG
) ||
7519 hclge_read_dev(&hdev
->hw
, HCLGE_FUN_RST_ING
);
7522 static bool hclge_get_cmdq_stat(struct hnae3_handle
*handle
)
7524 struct hclge_vport
*vport
= hclge_get_vport(handle
);
7525 struct hclge_dev
*hdev
= vport
->back
;
7527 return test_bit(HCLGE_COMM_STATE_CMD_DISABLE
, &hdev
->hw
.hw
.comm_state
);
7530 static bool hclge_ae_dev_resetting(struct hnae3_handle
*handle
)
7532 struct hclge_vport
*vport
= hclge_get_vport(handle
);
7533 struct hclge_dev
*hdev
= vport
->back
;
7535 return test_bit(HCLGE_STATE_RST_HANDLING
, &hdev
->state
);
7538 static unsigned long hclge_ae_dev_reset_cnt(struct hnae3_handle
*handle
)
7540 struct hclge_vport
*vport
= hclge_get_vport(handle
);
7541 struct hclge_dev
*hdev
= vport
->back
;
7543 return hdev
->rst_stats
.hw_reset_done_cnt
;
7546 static void hclge_enable_fd(struct hnae3_handle
*handle
, bool enable
)
7548 struct hclge_vport
*vport
= hclge_get_vport(handle
);
7549 struct hclge_dev
*hdev
= vport
->back
;
7551 hdev
->fd_en
= enable
;
7554 set_bit(HCLGE_STATE_FD_CLEAR_ALL
, &hdev
->state
);
7556 hclge_restore_fd_entries(handle
);
7558 hclge_task_schedule(hdev
, 0);
7561 static void hclge_cfg_mac_mode(struct hclge_dev
*hdev
, bool enable
)
7563 #define HCLGE_LINK_STATUS_WAIT_CNT 3
7565 struct hclge_desc desc
;
7566 struct hclge_config_mac_mode_cmd
*req
=
7567 (struct hclge_config_mac_mode_cmd
*)desc
.data
;
7571 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_CONFIG_MAC_MODE
, false);
7574 hnae3_set_bit(loop_en
, HCLGE_MAC_TX_EN_B
, 1U);
7575 hnae3_set_bit(loop_en
, HCLGE_MAC_RX_EN_B
, 1U);
7576 hnae3_set_bit(loop_en
, HCLGE_MAC_PAD_TX_B
, 1U);
7577 hnae3_set_bit(loop_en
, HCLGE_MAC_PAD_RX_B
, 1U);
7578 hnae3_set_bit(loop_en
, HCLGE_MAC_FCS_TX_B
, 1U);
7579 hnae3_set_bit(loop_en
, HCLGE_MAC_RX_FCS_B
, 1U);
7580 hnae3_set_bit(loop_en
, HCLGE_MAC_RX_FCS_STRIP_B
, 1U);
7581 hnae3_set_bit(loop_en
, HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B
, 1U);
7582 hnae3_set_bit(loop_en
, HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B
, 1U);
7583 hnae3_set_bit(loop_en
, HCLGE_MAC_TX_UNDER_MIN_ERR_B
, 1U);
7586 req
->txrx_pad_fcs_loop_en
= cpu_to_le32(loop_en
);
7588 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
7590 dev_err(&hdev
->pdev
->dev
,
7591 "mac enable fail, ret =%d.\n", ret
);
7596 hclge_mac_link_status_wait(hdev
, HCLGE_LINK_STATUS_DOWN
,
7597 HCLGE_LINK_STATUS_WAIT_CNT
);
7600 static int hclge_config_switch_param(struct hclge_dev
*hdev
, int vfid
,
7601 u8 switch_param
, u8 param_mask
)
7603 struct hclge_mac_vlan_switch_cmd
*req
;
7604 struct hclge_desc desc
;
7608 func_id
= hclge_get_port_number(HOST_PORT
, 0, vfid
, 0);
7609 req
= (struct hclge_mac_vlan_switch_cmd
*)desc
.data
;
7611 /* read current config parameter */
7612 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_MAC_VLAN_SWITCH_PARAM
,
7614 req
->roce_sel
= HCLGE_MAC_VLAN_NIC_SEL
;
7615 req
->func_id
= cpu_to_le32(func_id
);
7617 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
7619 dev_err(&hdev
->pdev
->dev
,
7620 "read mac vlan switch parameter fail, ret = %d\n", ret
);
7624 /* modify and write new config parameter */
7625 hclge_comm_cmd_reuse_desc(&desc
, false);
7626 req
->switch_param
= (req
->switch_param
& param_mask
) | switch_param
;
7627 req
->param_mask
= param_mask
;
7629 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
7631 dev_err(&hdev
->pdev
->dev
,
7632 "set mac vlan switch parameter fail, ret = %d\n", ret
);
7636 static void hclge_phy_link_status_wait(struct hclge_dev
*hdev
,
7639 #define HCLGE_PHY_LINK_STATUS_NUM 200
7641 struct phy_device
*phydev
= hdev
->hw
.mac
.phydev
;
7646 ret
= phy_read_status(phydev
);
7648 dev_err(&hdev
->pdev
->dev
,
7649 "phy update link status fail, ret = %d\n", ret
);
7653 if (phydev
->link
== link_ret
)
7656 msleep(HCLGE_LINK_STATUS_MS
);
7657 } while (++i
< HCLGE_PHY_LINK_STATUS_NUM
);
7660 static int hclge_mac_link_status_wait(struct hclge_dev
*hdev
, int link_ret
,
7668 ret
= hclge_get_mac_link_status(hdev
, &link_status
);
7671 if (link_status
== link_ret
)
7674 msleep(HCLGE_LINK_STATUS_MS
);
7675 } while (++i
< wait_cnt
);
7679 static int hclge_mac_phy_link_status_wait(struct hclge_dev
*hdev
, bool en
,
7682 #define HCLGE_MAC_LINK_STATUS_NUM 100
7686 link_ret
= en
? HCLGE_LINK_STATUS_UP
: HCLGE_LINK_STATUS_DOWN
;
7689 hclge_phy_link_status_wait(hdev
, link_ret
);
7691 return hclge_mac_link_status_wait(hdev
, link_ret
,
7692 HCLGE_MAC_LINK_STATUS_NUM
);
7695 static int hclge_set_app_loopback(struct hclge_dev
*hdev
, bool en
)
7697 struct hclge_config_mac_mode_cmd
*req
;
7698 struct hclge_desc desc
;
7702 req
= (struct hclge_config_mac_mode_cmd
*)&desc
.data
[0];
7703 /* 1 Read out the MAC mode config at first */
7704 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_CONFIG_MAC_MODE
, true);
7705 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
7707 dev_err(&hdev
->pdev
->dev
,
7708 "mac loopback get fail, ret =%d.\n", ret
);
7712 /* 2 Then setup the loopback flag */
7713 loop_en
= le32_to_cpu(req
->txrx_pad_fcs_loop_en
);
7714 hnae3_set_bit(loop_en
, HCLGE_MAC_APP_LP_B
, en
? 1 : 0);
7716 req
->txrx_pad_fcs_loop_en
= cpu_to_le32(loop_en
);
7718 /* 3 Config mac work mode with loopback flag
7719 * and its original configure parameters
7721 hclge_comm_cmd_reuse_desc(&desc
, false);
7722 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
7724 dev_err(&hdev
->pdev
->dev
,
7725 "mac loopback set fail, ret =%d.\n", ret
);
7729 static int hclge_cfg_common_loopback_cmd_send(struct hclge_dev
*hdev
, bool en
,
7730 enum hnae3_loop loop_mode
)
7732 struct hclge_common_lb_cmd
*req
;
7733 struct hclge_desc desc
;
7737 req
= (struct hclge_common_lb_cmd
*)desc
.data
;
7738 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_COMMON_LOOPBACK
, false);
7740 switch (loop_mode
) {
7741 case HNAE3_LOOP_SERIAL_SERDES
:
7742 loop_mode_b
= HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B
;
7744 case HNAE3_LOOP_PARALLEL_SERDES
:
7745 loop_mode_b
= HCLGE_CMD_SERDES_PARALLEL_INNER_LOOP_B
;
7747 case HNAE3_LOOP_PHY
:
7748 loop_mode_b
= HCLGE_CMD_GE_PHY_INNER_LOOP_B
;
7751 dev_err(&hdev
->pdev
->dev
,
7752 "unsupported loopback mode %d\n", loop_mode
);
7756 req
->mask
= loop_mode_b
;
7758 req
->enable
= loop_mode_b
;
7760 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
7762 dev_err(&hdev
->pdev
->dev
,
7763 "failed to send loopback cmd, loop_mode = %d, ret = %d\n",
7769 static int hclge_cfg_common_loopback_wait(struct hclge_dev
*hdev
)
7771 #define HCLGE_COMMON_LB_RETRY_MS 10
7772 #define HCLGE_COMMON_LB_RETRY_NUM 100
7774 struct hclge_common_lb_cmd
*req
;
7775 struct hclge_desc desc
;
7779 req
= (struct hclge_common_lb_cmd
*)desc
.data
;
7782 msleep(HCLGE_COMMON_LB_RETRY_MS
);
7783 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_COMMON_LOOPBACK
,
7785 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
7787 dev_err(&hdev
->pdev
->dev
,
7788 "failed to get loopback done status, ret = %d\n",
7792 } while (++i
< HCLGE_COMMON_LB_RETRY_NUM
&&
7793 !(req
->result
& HCLGE_CMD_COMMON_LB_DONE_B
));
7795 if (!(req
->result
& HCLGE_CMD_COMMON_LB_DONE_B
)) {
7796 dev_err(&hdev
->pdev
->dev
, "wait loopback timeout\n");
7798 } else if (!(req
->result
& HCLGE_CMD_COMMON_LB_SUCCESS_B
)) {
7799 dev_err(&hdev
->pdev
->dev
, "failed to do loopback test\n");
7806 static int hclge_cfg_common_loopback(struct hclge_dev
*hdev
, bool en
,
7807 enum hnae3_loop loop_mode
)
7811 ret
= hclge_cfg_common_loopback_cmd_send(hdev
, en
, loop_mode
);
7815 return hclge_cfg_common_loopback_wait(hdev
);
7818 static int hclge_set_common_loopback(struct hclge_dev
*hdev
, bool en
,
7819 enum hnae3_loop loop_mode
)
7823 ret
= hclge_cfg_common_loopback(hdev
, en
, loop_mode
);
7827 hclge_cfg_mac_mode(hdev
, en
);
7829 ret
= hclge_mac_phy_link_status_wait(hdev
, en
, false);
7831 dev_err(&hdev
->pdev
->dev
,
7832 "serdes loopback config mac mode timeout\n");
7837 static int hclge_enable_phy_loopback(struct hclge_dev
*hdev
,
7838 struct phy_device
*phydev
)
7842 if (!phydev
->suspended
) {
7843 ret
= phy_suspend(phydev
);
7848 ret
= phy_resume(phydev
);
7852 return phy_loopback(phydev
, true);
7855 static int hclge_disable_phy_loopback(struct hclge_dev
*hdev
,
7856 struct phy_device
*phydev
)
7860 ret
= phy_loopback(phydev
, false);
7864 return phy_suspend(phydev
);
7867 static int hclge_set_phy_loopback(struct hclge_dev
*hdev
, bool en
)
7869 struct phy_device
*phydev
= hdev
->hw
.mac
.phydev
;
7873 if (hnae3_dev_phy_imp_supported(hdev
))
7874 return hclge_set_common_loopback(hdev
, en
,
7880 ret
= hclge_enable_phy_loopback(hdev
, phydev
);
7882 ret
= hclge_disable_phy_loopback(hdev
, phydev
);
7884 dev_err(&hdev
->pdev
->dev
,
7885 "set phy loopback fail, ret = %d\n", ret
);
7889 hclge_cfg_mac_mode(hdev
, en
);
7891 ret
= hclge_mac_phy_link_status_wait(hdev
, en
, true);
7893 dev_err(&hdev
->pdev
->dev
,
7894 "phy loopback config mac mode timeout\n");
7899 static int hclge_tqp_enable_cmd_send(struct hclge_dev
*hdev
, u16 tqp_id
,
7900 u16 stream_id
, bool enable
)
7902 struct hclge_desc desc
;
7903 struct hclge_cfg_com_tqp_queue_cmd
*req
=
7904 (struct hclge_cfg_com_tqp_queue_cmd
*)desc
.data
;
7906 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_CFG_COM_TQP_QUEUE
, false);
7907 req
->tqp_id
= cpu_to_le16(tqp_id
);
7908 req
->stream_id
= cpu_to_le16(stream_id
);
7910 req
->enable
|= 1U << HCLGE_TQP_ENABLE_B
;
7912 return hclge_cmd_send(&hdev
->hw
, &desc
, 1);
7915 static int hclge_tqp_enable(struct hnae3_handle
*handle
, bool enable
)
7917 struct hclge_vport
*vport
= hclge_get_vport(handle
);
7918 struct hclge_dev
*hdev
= vport
->back
;
7922 for (i
= 0; i
< handle
->kinfo
.num_tqps
; i
++) {
7923 ret
= hclge_tqp_enable_cmd_send(hdev
, i
, 0, enable
);
7930 static int hclge_set_loopback(struct hnae3_handle
*handle
,
7931 enum hnae3_loop loop_mode
, bool en
)
7933 struct hclge_vport
*vport
= hclge_get_vport(handle
);
7934 struct hclge_dev
*hdev
= vport
->back
;
7937 /* Loopback can be enabled in three places: SSU, MAC, and serdes. By
7938 * default, SSU loopback is enabled, so if the SMAC and the DMAC are
7939 * the same, the packets are looped back in the SSU. If SSU loopback
7940 * is disabled, packets can reach MAC even if SMAC is the same as DMAC.
7942 if (hdev
->ae_dev
->dev_version
>= HNAE3_DEVICE_VERSION_V2
) {
7943 u8 switch_param
= en
? 0 : BIT(HCLGE_SWITCH_ALW_LPBK_B
);
7945 ret
= hclge_config_switch_param(hdev
, PF_VPORT_ID
, switch_param
,
7946 HCLGE_SWITCH_ALW_LPBK_MASK
);
7951 switch (loop_mode
) {
7952 case HNAE3_LOOP_APP
:
7953 ret
= hclge_set_app_loopback(hdev
, en
);
7955 case HNAE3_LOOP_SERIAL_SERDES
:
7956 case HNAE3_LOOP_PARALLEL_SERDES
:
7957 ret
= hclge_set_common_loopback(hdev
, en
, loop_mode
);
7959 case HNAE3_LOOP_PHY
:
7960 ret
= hclge_set_phy_loopback(hdev
, en
);
7962 case HNAE3_LOOP_EXTERNAL
:
7966 dev_err(&hdev
->pdev
->dev
,
7967 "loop_mode %d is not supported\n", loop_mode
);
7974 ret
= hclge_tqp_enable(handle
, en
);
7976 dev_err(&hdev
->pdev
->dev
, "failed to %s tqp in loopback, ret = %d\n",
7977 en
? "enable" : "disable", ret
);
7982 static int hclge_set_default_loopback(struct hclge_dev
*hdev
)
7986 ret
= hclge_set_app_loopback(hdev
, false);
7990 ret
= hclge_cfg_common_loopback(hdev
, false, HNAE3_LOOP_SERIAL_SERDES
);
7994 return hclge_cfg_common_loopback(hdev
, false,
7995 HNAE3_LOOP_PARALLEL_SERDES
);
7998 static void hclge_flush_link_update(struct hclge_dev
*hdev
)
8000 #define HCLGE_FLUSH_LINK_TIMEOUT 100000
8002 unsigned long last
= hdev
->serv_processed_cnt
;
8005 while (test_bit(HCLGE_STATE_LINK_UPDATING
, &hdev
->state
) &&
8006 i
++ < HCLGE_FLUSH_LINK_TIMEOUT
&&
8007 last
== hdev
->serv_processed_cnt
)
8011 static void hclge_set_timer_task(struct hnae3_handle
*handle
, bool enable
)
8013 struct hclge_vport
*vport
= hclge_get_vport(handle
);
8014 struct hclge_dev
*hdev
= vport
->back
;
8017 hclge_task_schedule(hdev
, 0);
8019 /* Set the DOWN flag here to disable link updating */
8020 set_bit(HCLGE_STATE_DOWN
, &hdev
->state
);
8022 smp_mb__after_atomic(); /* flush memory to make sure DOWN is seen by service task */
8023 hclge_flush_link_update(hdev
);
8027 static int hclge_ae_start(struct hnae3_handle
*handle
)
8029 struct hclge_vport
*vport
= hclge_get_vport(handle
);
8030 struct hclge_dev
*hdev
= vport
->back
;
8033 hclge_cfg_mac_mode(hdev
, true);
8034 clear_bit(HCLGE_STATE_DOWN
, &hdev
->state
);
8035 hdev
->hw
.mac
.link
= 0;
8037 /* reset tqp stats */
8038 hclge_comm_reset_tqp_stats(handle
);
8040 hclge_mac_start_phy(hdev
);
8045 static void hclge_ae_stop(struct hnae3_handle
*handle
)
8047 struct hclge_vport
*vport
= hclge_get_vport(handle
);
8048 struct hclge_dev
*hdev
= vport
->back
;
8050 set_bit(HCLGE_STATE_DOWN
, &hdev
->state
);
8051 spin_lock_bh(&hdev
->fd_rule_lock
);
8052 hclge_clear_arfs_rules(hdev
);
8053 spin_unlock_bh(&hdev
->fd_rule_lock
);
8055 /* If it is not PF reset or FLR, the firmware will disable the MAC,
8056 * so it only need to stop phy here.
8058 if (test_bit(HCLGE_STATE_RST_HANDLING
, &hdev
->state
)) {
8059 hclge_pfc_pause_en_cfg(hdev
, HCLGE_PFC_TX_RX_DISABLE
,
8061 if (hdev
->reset_type
!= HNAE3_FUNC_RESET
&&
8062 hdev
->reset_type
!= HNAE3_FLR_RESET
) {
8063 hclge_mac_stop_phy(hdev
);
8064 hclge_update_link_status(hdev
);
8069 hclge_reset_tqp(handle
);
8071 hclge_config_mac_tnl_int(hdev
, false);
8074 hclge_cfg_mac_mode(hdev
, false);
8076 hclge_mac_stop_phy(hdev
);
8078 /* reset tqp stats */
8079 hclge_comm_reset_tqp_stats(handle
);
8080 hclge_update_link_status(hdev
);
8083 int hclge_vport_start(struct hclge_vport
*vport
)
8085 struct hclge_dev
*hdev
= vport
->back
;
8087 set_bit(HCLGE_VPORT_STATE_INITED
, &vport
->state
);
8088 set_bit(HCLGE_VPORT_STATE_ALIVE
, &vport
->state
);
8089 set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE
, &vport
->state
);
8090 vport
->last_active_jiffies
= jiffies
;
8091 vport
->need_notify
= 0;
8093 if (test_bit(vport
->vport_id
, hdev
->vport_config_block
)) {
8094 if (vport
->vport_id
) {
8095 hclge_restore_mac_table_common(vport
);
8096 hclge_restore_vport_vlan_table(vport
);
8098 hclge_restore_hw_table(hdev
);
8102 clear_bit(vport
->vport_id
, hdev
->vport_config_block
);
8107 void hclge_vport_stop(struct hclge_vport
*vport
)
8109 clear_bit(HCLGE_VPORT_STATE_INITED
, &vport
->state
);
8110 clear_bit(HCLGE_VPORT_STATE_ALIVE
, &vport
->state
);
8111 vport
->need_notify
= 0;
8114 static int hclge_client_start(struct hnae3_handle
*handle
)
8116 struct hclge_vport
*vport
= hclge_get_vport(handle
);
8118 return hclge_vport_start(vport
);
8121 static void hclge_client_stop(struct hnae3_handle
*handle
)
8123 struct hclge_vport
*vport
= hclge_get_vport(handle
);
8125 hclge_vport_stop(vport
);
8128 static int hclge_get_mac_vlan_cmd_status(struct hclge_vport
*vport
,
8129 u16 cmdq_resp
, u8 resp_code
,
8130 enum hclge_mac_vlan_tbl_opcode op
)
8132 struct hclge_dev
*hdev
= vport
->back
;
8135 dev_err(&hdev
->pdev
->dev
,
8136 "cmdq execute failed for get_mac_vlan_cmd_status,status=%u.\n",
8141 if (op
== HCLGE_MAC_VLAN_ADD
) {
8142 if (!resp_code
|| resp_code
== 1)
8144 else if (resp_code
== HCLGE_ADD_UC_OVERFLOW
||
8145 resp_code
== HCLGE_ADD_MC_OVERFLOW
)
8148 dev_err(&hdev
->pdev
->dev
,
8149 "add mac addr failed for undefined, code=%u.\n",
8152 } else if (op
== HCLGE_MAC_VLAN_REMOVE
) {
8155 } else if (resp_code
== 1) {
8156 dev_dbg(&hdev
->pdev
->dev
,
8157 "remove mac addr failed for miss.\n");
8161 dev_err(&hdev
->pdev
->dev
,
8162 "remove mac addr failed for undefined, code=%u.\n",
8165 } else if (op
== HCLGE_MAC_VLAN_LKUP
) {
8168 } else if (resp_code
== 1) {
8169 dev_dbg(&hdev
->pdev
->dev
,
8170 "lookup mac addr failed for miss.\n");
8174 dev_err(&hdev
->pdev
->dev
,
8175 "lookup mac addr failed for undefined, code=%u.\n",
8180 dev_err(&hdev
->pdev
->dev
,
8181 "unknown opcode for get_mac_vlan_cmd_status, opcode=%d.\n", op
);
8186 static int hclge_update_desc_vfid(struct hclge_desc
*desc
, int vfid
, bool clr
)
8188 #define HCLGE_VF_NUM_IN_FIRST_DESC 192
8190 unsigned int word_num
;
8191 unsigned int bit_num
;
8193 if (vfid
> 255 || vfid
< 0)
8196 if (vfid
>= 0 && vfid
< HCLGE_VF_NUM_IN_FIRST_DESC
) {
8197 word_num
= vfid
/ 32;
8198 bit_num
= vfid
% 32;
8200 desc
[1].data
[word_num
] &= cpu_to_le32(~(1 << bit_num
));
8202 desc
[1].data
[word_num
] |= cpu_to_le32(1 << bit_num
);
8204 word_num
= (vfid
- HCLGE_VF_NUM_IN_FIRST_DESC
) / 32;
8205 bit_num
= vfid
% 32;
8207 desc
[2].data
[word_num
] &= cpu_to_le32(~(1 << bit_num
));
8209 desc
[2].data
[word_num
] |= cpu_to_le32(1 << bit_num
);
8215 static bool hclge_is_all_function_id_zero(struct hclge_desc
*desc
)
8217 #define HCLGE_DESC_NUMBER 3
8218 #define HCLGE_FUNC_NUMBER_PER_DESC 6
8221 for (i
= 1; i
< HCLGE_DESC_NUMBER
; i
++)
8222 for (j
= 0; j
< HCLGE_FUNC_NUMBER_PER_DESC
; j
++)
8223 if (desc
[i
].data
[j
])
8229 static void hclge_prepare_mac_addr(struct hclge_mac_vlan_tbl_entry_cmd
*new_req
,
8230 const u8
*addr
, bool is_mc
)
8232 const unsigned char *mac_addr
= addr
;
8233 u32 high_val
= mac_addr
[2] << 16 | (mac_addr
[3] << 24) |
8234 (mac_addr
[0]) | (mac_addr
[1] << 8);
8235 u32 low_val
= mac_addr
[4] | (mac_addr
[5] << 8);
8237 hnae3_set_bit(new_req
->flags
, HCLGE_MAC_VLAN_BIT0_EN_B
, 1);
8239 hnae3_set_bit(new_req
->entry_type
, HCLGE_MAC_VLAN_BIT1_EN_B
, 1);
8240 hnae3_set_bit(new_req
->mc_mac_en
, HCLGE_MAC_VLAN_BIT0_EN_B
, 1);
8243 new_req
->mac_addr_hi32
= cpu_to_le32(high_val
);
8244 new_req
->mac_addr_lo16
= cpu_to_le16(low_val
& 0xffff);
8247 static int hclge_remove_mac_vlan_tbl(struct hclge_vport
*vport
,
8248 struct hclge_mac_vlan_tbl_entry_cmd
*req
)
8250 struct hclge_dev
*hdev
= vport
->back
;
8251 struct hclge_desc desc
;
8256 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_MAC_VLAN_REMOVE
, false);
8258 memcpy(desc
.data
, req
, sizeof(struct hclge_mac_vlan_tbl_entry_cmd
));
8260 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
8262 dev_err(&hdev
->pdev
->dev
,
8263 "del mac addr failed for cmd_send, ret =%d.\n",
8267 resp_code
= (le32_to_cpu(desc
.data
[0]) >> 8) & 0xff;
8268 retval
= le16_to_cpu(desc
.retval
);
8270 return hclge_get_mac_vlan_cmd_status(vport
, retval
, resp_code
,
8271 HCLGE_MAC_VLAN_REMOVE
);
8274 static int hclge_lookup_mac_vlan_tbl(struct hclge_vport
*vport
,
8275 struct hclge_mac_vlan_tbl_entry_cmd
*req
,
8276 struct hclge_desc
*desc
,
8279 struct hclge_dev
*hdev
= vport
->back
;
8284 hclge_cmd_setup_basic_desc(&desc
[0], HCLGE_OPC_MAC_VLAN_ADD
, true);
8286 desc
[0].flag
|= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT
);
8287 memcpy(desc
[0].data
,
8289 sizeof(struct hclge_mac_vlan_tbl_entry_cmd
));
8290 hclge_cmd_setup_basic_desc(&desc
[1],
8291 HCLGE_OPC_MAC_VLAN_ADD
,
8293 desc
[1].flag
|= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT
);
8294 hclge_cmd_setup_basic_desc(&desc
[2],
8295 HCLGE_OPC_MAC_VLAN_ADD
,
8297 ret
= hclge_cmd_send(&hdev
->hw
, desc
, 3);
8299 memcpy(desc
[0].data
,
8301 sizeof(struct hclge_mac_vlan_tbl_entry_cmd
));
8302 ret
= hclge_cmd_send(&hdev
->hw
, desc
, 1);
8305 dev_err(&hdev
->pdev
->dev
,
8306 "lookup mac addr failed for cmd_send, ret =%d.\n",
8310 resp_code
= (le32_to_cpu(desc
[0].data
[0]) >> 8) & 0xff;
8311 retval
= le16_to_cpu(desc
[0].retval
);
8313 return hclge_get_mac_vlan_cmd_status(vport
, retval
, resp_code
,
8314 HCLGE_MAC_VLAN_LKUP
);
8317 static int hclge_add_mac_vlan_tbl(struct hclge_vport
*vport
,
8318 struct hclge_mac_vlan_tbl_entry_cmd
*req
,
8319 struct hclge_desc
*mc_desc
)
8321 struct hclge_dev
*hdev
= vport
->back
;
8328 struct hclge_desc desc
;
8330 hclge_cmd_setup_basic_desc(&desc
,
8331 HCLGE_OPC_MAC_VLAN_ADD
,
8333 memcpy(desc
.data
, req
,
8334 sizeof(struct hclge_mac_vlan_tbl_entry_cmd
));
8335 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
8336 resp_code
= (le32_to_cpu(desc
.data
[0]) >> 8) & 0xff;
8337 retval
= le16_to_cpu(desc
.retval
);
8339 cfg_status
= hclge_get_mac_vlan_cmd_status(vport
, retval
,
8341 HCLGE_MAC_VLAN_ADD
);
8343 hclge_comm_cmd_reuse_desc(&mc_desc
[0], false);
8344 mc_desc
[0].flag
|= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT
);
8345 hclge_comm_cmd_reuse_desc(&mc_desc
[1], false);
8346 mc_desc
[1].flag
|= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT
);
8347 hclge_comm_cmd_reuse_desc(&mc_desc
[2], false);
8348 mc_desc
[2].flag
&= cpu_to_le16(~HCLGE_COMM_CMD_FLAG_NEXT
);
8349 memcpy(mc_desc
[0].data
, req
,
8350 sizeof(struct hclge_mac_vlan_tbl_entry_cmd
));
8351 ret
= hclge_cmd_send(&hdev
->hw
, mc_desc
, 3);
8352 resp_code
= (le32_to_cpu(mc_desc
[0].data
[0]) >> 8) & 0xff;
8353 retval
= le16_to_cpu(mc_desc
[0].retval
);
8355 cfg_status
= hclge_get_mac_vlan_cmd_status(vport
, retval
,
8357 HCLGE_MAC_VLAN_ADD
);
8361 dev_err(&hdev
->pdev
->dev
,
8362 "add mac addr failed for cmd_send, ret =%d.\n",
8370 static int hclge_set_umv_space(struct hclge_dev
*hdev
, u16 space_size
,
8371 u16
*allocated_size
)
8373 struct hclge_umv_spc_alc_cmd
*req
;
8374 struct hclge_desc desc
;
8377 req
= (struct hclge_umv_spc_alc_cmd
*)desc
.data
;
8378 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_MAC_VLAN_ALLOCATE
, false);
8380 req
->space_size
= cpu_to_le32(space_size
);
8382 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
8384 dev_err(&hdev
->pdev
->dev
, "failed to set umv space, ret = %d\n",
8389 *allocated_size
= le32_to_cpu(desc
.data
[1]);
8394 static int hclge_init_umv_space(struct hclge_dev
*hdev
)
8396 u16 allocated_size
= 0;
8399 ret
= hclge_set_umv_space(hdev
, hdev
->wanted_umv_size
, &allocated_size
);
8403 if (allocated_size
< hdev
->wanted_umv_size
)
8404 dev_warn(&hdev
->pdev
->dev
,
8405 "failed to alloc umv space, want %u, get %u\n",
8406 hdev
->wanted_umv_size
, allocated_size
);
8408 hdev
->max_umv_size
= allocated_size
;
8409 hdev
->priv_umv_size
= hdev
->max_umv_size
/ (hdev
->num_alloc_vport
+ 1);
8410 hdev
->share_umv_size
= hdev
->priv_umv_size
+
8411 hdev
->max_umv_size
% (hdev
->num_alloc_vport
+ 1);
8413 if (hdev
->ae_dev
->dev_specs
.mc_mac_size
)
8414 set_bit(HNAE3_DEV_SUPPORT_MC_MAC_MNG_B
, hdev
->ae_dev
->caps
);
8419 static void hclge_reset_umv_space(struct hclge_dev
*hdev
)
8421 struct hclge_vport
*vport
;
8424 for (i
= 0; i
< hdev
->num_alloc_vport
; i
++) {
8425 vport
= &hdev
->vport
[i
];
8426 vport
->used_umv_num
= 0;
8429 mutex_lock(&hdev
->vport_lock
);
8430 hdev
->share_umv_size
= hdev
->priv_umv_size
+
8431 hdev
->max_umv_size
% (hdev
->num_alloc_vport
+ 1);
8432 mutex_unlock(&hdev
->vport_lock
);
8434 hdev
->used_mc_mac_num
= 0;
8437 static bool hclge_is_umv_space_full(struct hclge_vport
*vport
, bool need_lock
)
8439 struct hclge_dev
*hdev
= vport
->back
;
8443 mutex_lock(&hdev
->vport_lock
);
8445 is_full
= (vport
->used_umv_num
>= hdev
->priv_umv_size
&&
8446 hdev
->share_umv_size
== 0);
8449 mutex_unlock(&hdev
->vport_lock
);
8454 static void hclge_update_umv_space(struct hclge_vport
*vport
, bool is_free
)
8456 struct hclge_dev
*hdev
= vport
->back
;
8459 if (vport
->used_umv_num
> hdev
->priv_umv_size
)
8460 hdev
->share_umv_size
++;
8462 if (vport
->used_umv_num
> 0)
8463 vport
->used_umv_num
--;
8465 if (vport
->used_umv_num
>= hdev
->priv_umv_size
&&
8466 hdev
->share_umv_size
> 0)
8467 hdev
->share_umv_size
--;
8468 vport
->used_umv_num
++;
8472 static struct hclge_mac_node
*hclge_find_mac_node(struct list_head
*list
,
8475 struct hclge_mac_node
*mac_node
, *tmp
;
8477 list_for_each_entry_safe(mac_node
, tmp
, list
, node
)
8478 if (ether_addr_equal(mac_addr
, mac_node
->mac_addr
))
8484 static void hclge_update_mac_node(struct hclge_mac_node
*mac_node
,
8485 enum HCLGE_MAC_NODE_STATE state
)
8488 /* from set_rx_mode or tmp_add_list */
8489 case HCLGE_MAC_TO_ADD
:
8490 if (mac_node
->state
== HCLGE_MAC_TO_DEL
)
8491 mac_node
->state
= HCLGE_MAC_ACTIVE
;
8493 /* only from set_rx_mode */
8494 case HCLGE_MAC_TO_DEL
:
8495 if (mac_node
->state
== HCLGE_MAC_TO_ADD
) {
8496 list_del(&mac_node
->node
);
8499 mac_node
->state
= HCLGE_MAC_TO_DEL
;
8502 /* only from tmp_add_list, the mac_node->state won't be
8505 case HCLGE_MAC_ACTIVE
:
8506 if (mac_node
->state
== HCLGE_MAC_TO_ADD
)
8507 mac_node
->state
= HCLGE_MAC_ACTIVE
;
8513 int hclge_update_mac_list(struct hclge_vport
*vport
,
8514 enum HCLGE_MAC_NODE_STATE state
,
8515 enum HCLGE_MAC_ADDR_TYPE mac_type
,
8516 const unsigned char *addr
)
8518 char format_mac_addr
[HNAE3_FORMAT_MAC_ADDR_LEN
];
8519 struct hclge_dev
*hdev
= vport
->back
;
8520 struct hclge_mac_node
*mac_node
;
8521 struct list_head
*list
;
8523 list
= (mac_type
== HCLGE_MAC_ADDR_UC
) ?
8524 &vport
->uc_mac_list
: &vport
->mc_mac_list
;
8526 spin_lock_bh(&vport
->mac_list_lock
);
8528 /* if the mac addr is already in the mac list, no need to add a new
8529 * one into it, just check the mac addr state, convert it to a new
8530 * state, or just remove it, or do nothing.
8532 mac_node
= hclge_find_mac_node(list
, addr
);
8534 hclge_update_mac_node(mac_node
, state
);
8535 spin_unlock_bh(&vport
->mac_list_lock
);
8536 set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE
, &vport
->state
);
8540 /* if this address is never added, unnecessary to delete */
8541 if (state
== HCLGE_MAC_TO_DEL
) {
8542 spin_unlock_bh(&vport
->mac_list_lock
);
8543 hnae3_format_mac_addr(format_mac_addr
, addr
);
8544 dev_err(&hdev
->pdev
->dev
,
8545 "failed to delete address %s from mac list\n",
8550 mac_node
= kzalloc(sizeof(*mac_node
), GFP_ATOMIC
);
8552 spin_unlock_bh(&vport
->mac_list_lock
);
8556 set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE
, &vport
->state
);
8558 mac_node
->state
= state
;
8559 ether_addr_copy(mac_node
->mac_addr
, addr
);
8560 list_add_tail(&mac_node
->node
, list
);
8562 spin_unlock_bh(&vport
->mac_list_lock
);
8567 static int hclge_add_uc_addr(struct hnae3_handle
*handle
,
8568 const unsigned char *addr
)
8570 struct hclge_vport
*vport
= hclge_get_vport(handle
);
8572 return hclge_update_mac_list(vport
, HCLGE_MAC_TO_ADD
, HCLGE_MAC_ADDR_UC
,
8576 int hclge_add_uc_addr_common(struct hclge_vport
*vport
,
8577 const unsigned char *addr
)
8579 char format_mac_addr
[HNAE3_FORMAT_MAC_ADDR_LEN
];
8580 struct hclge_dev
*hdev
= vport
->back
;
8581 struct hclge_mac_vlan_tbl_entry_cmd req
;
8582 struct hclge_desc desc
;
8583 u16 egress_port
= 0;
8586 /* mac addr check */
8587 if (is_zero_ether_addr(addr
) ||
8588 is_broadcast_ether_addr(addr
) ||
8589 is_multicast_ether_addr(addr
)) {
8590 hnae3_format_mac_addr(format_mac_addr
, addr
);
8591 dev_err(&hdev
->pdev
->dev
,
8592 "Set_uc mac err! invalid mac:%s. is_zero:%d,is_br=%d,is_mul=%d\n",
8593 format_mac_addr
, is_zero_ether_addr(addr
),
8594 is_broadcast_ether_addr(addr
),
8595 is_multicast_ether_addr(addr
));
8599 memset(&req
, 0, sizeof(req
));
8601 hnae3_set_field(egress_port
, HCLGE_MAC_EPORT_VFID_M
,
8602 HCLGE_MAC_EPORT_VFID_S
, vport
->vport_id
);
8604 req
.egress_port
= cpu_to_le16(egress_port
);
8606 hclge_prepare_mac_addr(&req
, addr
, false);
8608 /* Lookup the mac address in the mac_vlan table, and add
8609 * it if the entry is inexistent. Repeated unicast entry
8610 * is not allowed in the mac vlan table.
8612 ret
= hclge_lookup_mac_vlan_tbl(vport
, &req
, &desc
, false);
8613 if (ret
== -ENOENT
) {
8614 mutex_lock(&hdev
->vport_lock
);
8615 if (!hclge_is_umv_space_full(vport
, false)) {
8616 ret
= hclge_add_mac_vlan_tbl(vport
, &req
, NULL
);
8618 hclge_update_umv_space(vport
, false);
8619 mutex_unlock(&hdev
->vport_lock
);
8622 mutex_unlock(&hdev
->vport_lock
);
8624 if (!(vport
->overflow_promisc_flags
& HNAE3_OVERFLOW_UPE
))
8625 dev_err(&hdev
->pdev
->dev
, "UC MAC table full(%u)\n",
8626 hdev
->priv_umv_size
);
8631 /* check if we just hit the duplicate */
8638 static int hclge_rm_uc_addr(struct hnae3_handle
*handle
,
8639 const unsigned char *addr
)
8641 struct hclge_vport
*vport
= hclge_get_vport(handle
);
8643 return hclge_update_mac_list(vport
, HCLGE_MAC_TO_DEL
, HCLGE_MAC_ADDR_UC
,
8647 int hclge_rm_uc_addr_common(struct hclge_vport
*vport
,
8648 const unsigned char *addr
)
8650 char format_mac_addr
[HNAE3_FORMAT_MAC_ADDR_LEN
];
8651 struct hclge_dev
*hdev
= vport
->back
;
8652 struct hclge_mac_vlan_tbl_entry_cmd req
;
8655 /* mac addr check */
8656 if (is_zero_ether_addr(addr
) ||
8657 is_broadcast_ether_addr(addr
) ||
8658 is_multicast_ether_addr(addr
)) {
8659 hnae3_format_mac_addr(format_mac_addr
, addr
);
8660 dev_dbg(&hdev
->pdev
->dev
, "Remove mac err! invalid mac:%s.\n",
8665 memset(&req
, 0, sizeof(req
));
8666 hnae3_set_bit(req
.entry_type
, HCLGE_MAC_VLAN_BIT0_EN_B
, 0);
8667 hclge_prepare_mac_addr(&req
, addr
, false);
8668 ret
= hclge_remove_mac_vlan_tbl(vport
, &req
);
8669 if (!ret
|| ret
== -ENOENT
) {
8670 mutex_lock(&hdev
->vport_lock
);
8671 hclge_update_umv_space(vport
, true);
8672 mutex_unlock(&hdev
->vport_lock
);
8679 static int hclge_add_mc_addr(struct hnae3_handle
*handle
,
8680 const unsigned char *addr
)
8682 struct hclge_vport
*vport
= hclge_get_vport(handle
);
8684 return hclge_update_mac_list(vport
, HCLGE_MAC_TO_ADD
, HCLGE_MAC_ADDR_MC
,
8688 int hclge_add_mc_addr_common(struct hclge_vport
*vport
,
8689 const unsigned char *addr
)
8691 char format_mac_addr
[HNAE3_FORMAT_MAC_ADDR_LEN
];
8692 struct hclge_dev
*hdev
= vport
->back
;
8693 struct hclge_mac_vlan_tbl_entry_cmd req
;
8694 struct hclge_desc desc
[3];
8695 bool is_new_addr
= false;
8698 /* mac addr check */
8699 if (!is_multicast_ether_addr(addr
)) {
8700 hnae3_format_mac_addr(format_mac_addr
, addr
);
8701 dev_err(&hdev
->pdev
->dev
,
8702 "Add mc mac err! invalid mac:%s.\n",
8706 memset(&req
, 0, sizeof(req
));
8707 hclge_prepare_mac_addr(&req
, addr
, true);
8708 status
= hclge_lookup_mac_vlan_tbl(vport
, &req
, desc
, true);
8710 if (hnae3_ae_dev_mc_mac_mng_supported(hdev
->ae_dev
) &&
8711 hdev
->used_mc_mac_num
>=
8712 hdev
->ae_dev
->dev_specs
.mc_mac_size
)
8717 /* This mac addr do not exist, add new entry for it */
8718 memset(desc
[0].data
, 0, sizeof(desc
[0].data
));
8719 memset(desc
[1].data
, 0, sizeof(desc
[0].data
));
8720 memset(desc
[2].data
, 0, sizeof(desc
[0].data
));
8722 status
= hclge_update_desc_vfid(desc
, vport
->vport_id
, false);
8725 status
= hclge_add_mac_vlan_tbl(vport
, &req
, desc
);
8726 if (status
== -ENOSPC
)
8728 else if (!status
&& is_new_addr
)
8729 hdev
->used_mc_mac_num
++;
8734 /* if already overflow, not to print each time */
8735 if (!(vport
->overflow_promisc_flags
& HNAE3_OVERFLOW_MPE
)) {
8736 vport
->overflow_promisc_flags
|= HNAE3_OVERFLOW_MPE
;
8737 dev_err(&hdev
->pdev
->dev
, "mc mac vlan table is full\n");
8743 static int hclge_rm_mc_addr(struct hnae3_handle
*handle
,
8744 const unsigned char *addr
)
8746 struct hclge_vport
*vport
= hclge_get_vport(handle
);
8748 return hclge_update_mac_list(vport
, HCLGE_MAC_TO_DEL
, HCLGE_MAC_ADDR_MC
,
8752 int hclge_rm_mc_addr_common(struct hclge_vport
*vport
,
8753 const unsigned char *addr
)
8755 char format_mac_addr
[HNAE3_FORMAT_MAC_ADDR_LEN
];
8756 struct hclge_dev
*hdev
= vport
->back
;
8757 struct hclge_mac_vlan_tbl_entry_cmd req
;
8758 enum hclge_comm_cmd_status status
;
8759 struct hclge_desc desc
[3];
8761 /* mac addr check */
8762 if (!is_multicast_ether_addr(addr
)) {
8763 hnae3_format_mac_addr(format_mac_addr
, addr
);
8764 dev_dbg(&hdev
->pdev
->dev
,
8765 "Remove mc mac err! invalid mac:%s.\n",
8770 memset(&req
, 0, sizeof(req
));
8771 hclge_prepare_mac_addr(&req
, addr
, true);
8772 status
= hclge_lookup_mac_vlan_tbl(vport
, &req
, desc
, true);
8774 /* This mac addr exist, remove this handle's VFID for it */
8775 status
= hclge_update_desc_vfid(desc
, vport
->vport_id
, true);
8779 if (hclge_is_all_function_id_zero(desc
)) {
8780 /* All the vfid is zero, so need to delete this entry */
8781 status
= hclge_remove_mac_vlan_tbl(vport
, &req
);
8783 hdev
->used_mc_mac_num
--;
8785 /* Not all the vfid is zero, update the vfid */
8786 status
= hclge_add_mac_vlan_tbl(vport
, &req
, desc
);
8788 } else if (status
== -ENOENT
) {
8795 static void hclge_sync_vport_mac_list(struct hclge_vport
*vport
,
8796 struct list_head
*list
,
8797 enum HCLGE_MAC_ADDR_TYPE mac_type
)
8799 int (*sync
)(struct hclge_vport
*vport
, const unsigned char *addr
);
8800 struct hclge_mac_node
*mac_node
, *tmp
;
8803 if (mac_type
== HCLGE_MAC_ADDR_UC
)
8804 sync
= hclge_add_uc_addr_common
;
8806 sync
= hclge_add_mc_addr_common
;
8808 list_for_each_entry_safe(mac_node
, tmp
, list
, node
) {
8809 ret
= sync(vport
, mac_node
->mac_addr
);
8811 mac_node
->state
= HCLGE_MAC_ACTIVE
;
8813 set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE
,
8816 /* If one unicast mac address is existing in hardware,
8817 * we need to try whether other unicast mac addresses
8818 * are new addresses that can be added.
8819 * Multicast mac address can be reusable, even though
8820 * there is no space to add new multicast mac address,
8821 * we should check whether other mac addresses are
8822 * existing in hardware for reuse.
8824 if ((mac_type
== HCLGE_MAC_ADDR_UC
&& ret
!= -EEXIST
) ||
8825 (mac_type
== HCLGE_MAC_ADDR_MC
&& ret
!= -ENOSPC
))
8831 static void hclge_unsync_vport_mac_list(struct hclge_vport
*vport
,
8832 struct list_head
*list
,
8833 enum HCLGE_MAC_ADDR_TYPE mac_type
)
8835 int (*unsync
)(struct hclge_vport
*vport
, const unsigned char *addr
);
8836 struct hclge_mac_node
*mac_node
, *tmp
;
8839 if (mac_type
== HCLGE_MAC_ADDR_UC
)
8840 unsync
= hclge_rm_uc_addr_common
;
8842 unsync
= hclge_rm_mc_addr_common
;
8844 list_for_each_entry_safe(mac_node
, tmp
, list
, node
) {
8845 ret
= unsync(vport
, mac_node
->mac_addr
);
8846 if (!ret
|| ret
== -ENOENT
) {
8847 list_del(&mac_node
->node
);
8850 set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE
,
8857 static bool hclge_sync_from_add_list(struct list_head
*add_list
,
8858 struct list_head
*mac_list
)
8860 struct hclge_mac_node
*mac_node
, *tmp
, *new_node
;
8861 bool all_added
= true;
8863 list_for_each_entry_safe(mac_node
, tmp
, add_list
, node
) {
8864 if (mac_node
->state
== HCLGE_MAC_TO_ADD
)
8867 /* if the mac address from tmp_add_list is not in the
8868 * uc/mc_mac_list, it means have received a TO_DEL request
8869 * during the time window of adding the mac address into mac
8870 * table. if mac_node state is ACTIVE, then change it to TO_DEL,
8871 * then it will be removed at next time. else it must be TO_ADD,
8872 * this address hasn't been added into mac table,
8873 * so just remove the mac node.
8875 new_node
= hclge_find_mac_node(mac_list
, mac_node
->mac_addr
);
8877 hclge_update_mac_node(new_node
, mac_node
->state
);
8878 list_del(&mac_node
->node
);
8880 } else if (mac_node
->state
== HCLGE_MAC_ACTIVE
) {
8881 mac_node
->state
= HCLGE_MAC_TO_DEL
;
8882 list_move_tail(&mac_node
->node
, mac_list
);
8884 list_del(&mac_node
->node
);
8892 static void hclge_sync_from_del_list(struct list_head
*del_list
,
8893 struct list_head
*mac_list
)
8895 struct hclge_mac_node
*mac_node
, *tmp
, *new_node
;
8897 list_for_each_entry_safe(mac_node
, tmp
, del_list
, node
) {
8898 new_node
= hclge_find_mac_node(mac_list
, mac_node
->mac_addr
);
8900 /* If the mac addr exists in the mac list, it means
8901 * received a new TO_ADD request during the time window
8902 * of configuring the mac address. For the mac node
8903 * state is TO_ADD, and the address is already in the
8904 * in the hardware(due to delete fail), so we just need
8905 * to change the mac node state to ACTIVE.
8907 new_node
->state
= HCLGE_MAC_ACTIVE
;
8908 list_del(&mac_node
->node
);
8911 list_move_tail(&mac_node
->node
, mac_list
);
8916 static void hclge_update_overflow_flags(struct hclge_vport
*vport
,
8917 enum HCLGE_MAC_ADDR_TYPE mac_type
,
8920 if (mac_type
== HCLGE_MAC_ADDR_UC
) {
8922 vport
->overflow_promisc_flags
&= ~HNAE3_OVERFLOW_UPE
;
8923 else if (hclge_is_umv_space_full(vport
, true))
8924 vport
->overflow_promisc_flags
|= HNAE3_OVERFLOW_UPE
;
8927 vport
->overflow_promisc_flags
&= ~HNAE3_OVERFLOW_MPE
;
8929 vport
->overflow_promisc_flags
|= HNAE3_OVERFLOW_MPE
;
8933 static void hclge_sync_vport_mac_table(struct hclge_vport
*vport
,
8934 enum HCLGE_MAC_ADDR_TYPE mac_type
)
8936 struct hclge_mac_node
*mac_node
, *tmp
, *new_node
;
8937 struct list_head tmp_add_list
, tmp_del_list
;
8938 struct list_head
*list
;
8941 INIT_LIST_HEAD(&tmp_add_list
);
8942 INIT_LIST_HEAD(&tmp_del_list
);
8944 /* move the mac addr to the tmp_add_list and tmp_del_list, then
8945 * we can add/delete these mac addr outside the spin lock
8947 list
= (mac_type
== HCLGE_MAC_ADDR_UC
) ?
8948 &vport
->uc_mac_list
: &vport
->mc_mac_list
;
8950 spin_lock_bh(&vport
->mac_list_lock
);
8952 list_for_each_entry_safe(mac_node
, tmp
, list
, node
) {
8953 switch (mac_node
->state
) {
8954 case HCLGE_MAC_TO_DEL
:
8955 list_move_tail(&mac_node
->node
, &tmp_del_list
);
8957 case HCLGE_MAC_TO_ADD
:
8958 new_node
= kzalloc(sizeof(*new_node
), GFP_ATOMIC
);
8961 ether_addr_copy(new_node
->mac_addr
, mac_node
->mac_addr
);
8962 new_node
->state
= mac_node
->state
;
8963 list_add_tail(&new_node
->node
, &tmp_add_list
);
8971 spin_unlock_bh(&vport
->mac_list_lock
);
8973 /* delete first, in order to get max mac table space for adding */
8974 hclge_unsync_vport_mac_list(vport
, &tmp_del_list
, mac_type
);
8975 hclge_sync_vport_mac_list(vport
, &tmp_add_list
, mac_type
);
8977 /* if some mac addresses were added/deleted fail, move back to the
8978 * mac_list, and retry at next time.
8980 spin_lock_bh(&vport
->mac_list_lock
);
8982 hclge_sync_from_del_list(&tmp_del_list
, list
);
8983 all_added
= hclge_sync_from_add_list(&tmp_add_list
, list
);
8985 spin_unlock_bh(&vport
->mac_list_lock
);
8987 hclge_update_overflow_flags(vport
, mac_type
, all_added
);
8990 static bool hclge_need_sync_mac_table(struct hclge_vport
*vport
)
8992 struct hclge_dev
*hdev
= vport
->back
;
8994 if (test_bit(vport
->vport_id
, hdev
->vport_config_block
))
8997 if (test_and_clear_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE
, &vport
->state
))
9003 static void hclge_sync_mac_table(struct hclge_dev
*hdev
)
9007 for (i
= 0; i
< hdev
->num_alloc_vport
; i
++) {
9008 struct hclge_vport
*vport
= &hdev
->vport
[i
];
9010 if (!hclge_need_sync_mac_table(vport
))
9013 hclge_sync_vport_mac_table(vport
, HCLGE_MAC_ADDR_UC
);
9014 hclge_sync_vport_mac_table(vport
, HCLGE_MAC_ADDR_MC
);
9018 static void hclge_build_del_list(struct list_head
*list
,
9020 struct list_head
*tmp_del_list
)
9022 struct hclge_mac_node
*mac_cfg
, *tmp
;
9024 list_for_each_entry_safe(mac_cfg
, tmp
, list
, node
) {
9025 switch (mac_cfg
->state
) {
9026 case HCLGE_MAC_TO_DEL
:
9027 case HCLGE_MAC_ACTIVE
:
9028 list_move_tail(&mac_cfg
->node
, tmp_del_list
);
9030 case HCLGE_MAC_TO_ADD
:
9032 list_del(&mac_cfg
->node
);
9040 static void hclge_unsync_del_list(struct hclge_vport
*vport
,
9041 int (*unsync
)(struct hclge_vport
*vport
,
9042 const unsigned char *addr
),
9044 struct list_head
*tmp_del_list
)
9046 struct hclge_mac_node
*mac_cfg
, *tmp
;
9049 list_for_each_entry_safe(mac_cfg
, tmp
, tmp_del_list
, node
) {
9050 ret
= unsync(vport
, mac_cfg
->mac_addr
);
9051 if (!ret
|| ret
== -ENOENT
) {
9052 /* clear all mac addr from hardware, but remain these
9053 * mac addr in the mac list, and restore them after
9054 * vf reset finished.
9057 mac_cfg
->state
== HCLGE_MAC_ACTIVE
) {
9058 mac_cfg
->state
= HCLGE_MAC_TO_ADD
;
9060 list_del(&mac_cfg
->node
);
9063 } else if (is_del_list
) {
9064 mac_cfg
->state
= HCLGE_MAC_TO_DEL
;
9069 void hclge_rm_vport_all_mac_table(struct hclge_vport
*vport
, bool is_del_list
,
9070 enum HCLGE_MAC_ADDR_TYPE mac_type
)
9072 int (*unsync
)(struct hclge_vport
*vport
, const unsigned char *addr
);
9073 struct hclge_dev
*hdev
= vport
->back
;
9074 struct list_head tmp_del_list
, *list
;
9076 if (mac_type
== HCLGE_MAC_ADDR_UC
) {
9077 list
= &vport
->uc_mac_list
;
9078 unsync
= hclge_rm_uc_addr_common
;
9080 list
= &vport
->mc_mac_list
;
9081 unsync
= hclge_rm_mc_addr_common
;
9084 INIT_LIST_HEAD(&tmp_del_list
);
9087 set_bit(vport
->vport_id
, hdev
->vport_config_block
);
9089 spin_lock_bh(&vport
->mac_list_lock
);
9091 hclge_build_del_list(list
, is_del_list
, &tmp_del_list
);
9093 spin_unlock_bh(&vport
->mac_list_lock
);
9095 hclge_unsync_del_list(vport
, unsync
, is_del_list
, &tmp_del_list
);
9097 spin_lock_bh(&vport
->mac_list_lock
);
9099 hclge_sync_from_del_list(&tmp_del_list
, list
);
9101 spin_unlock_bh(&vport
->mac_list_lock
);
9104 /* remove all mac address when uninitailize */
9105 static void hclge_uninit_vport_mac_list(struct hclge_vport
*vport
,
9106 enum HCLGE_MAC_ADDR_TYPE mac_type
)
9108 struct hclge_mac_node
*mac_node
, *tmp
;
9109 struct hclge_dev
*hdev
= vport
->back
;
9110 struct list_head tmp_del_list
, *list
;
9112 INIT_LIST_HEAD(&tmp_del_list
);
9114 list
= (mac_type
== HCLGE_MAC_ADDR_UC
) ?
9115 &vport
->uc_mac_list
: &vport
->mc_mac_list
;
9117 spin_lock_bh(&vport
->mac_list_lock
);
9119 list_for_each_entry_safe(mac_node
, tmp
, list
, node
) {
9120 switch (mac_node
->state
) {
9121 case HCLGE_MAC_TO_DEL
:
9122 case HCLGE_MAC_ACTIVE
:
9123 list_move_tail(&mac_node
->node
, &tmp_del_list
);
9125 case HCLGE_MAC_TO_ADD
:
9126 list_del(&mac_node
->node
);
9132 spin_unlock_bh(&vport
->mac_list_lock
);
9134 hclge_unsync_vport_mac_list(vport
, &tmp_del_list
, mac_type
);
9136 if (!list_empty(&tmp_del_list
))
9137 dev_warn(&hdev
->pdev
->dev
,
9138 "uninit %s mac list for vport %u not completely.\n",
9139 mac_type
== HCLGE_MAC_ADDR_UC
? "uc" : "mc",
9142 list_for_each_entry_safe(mac_node
, tmp
, &tmp_del_list
, node
) {
9143 list_del(&mac_node
->node
);
9148 static void hclge_uninit_mac_table(struct hclge_dev
*hdev
)
9150 struct hclge_vport
*vport
;
9153 for (i
= 0; i
< hdev
->num_alloc_vport
; i
++) {
9154 vport
= &hdev
->vport
[i
];
9155 hclge_uninit_vport_mac_list(vport
, HCLGE_MAC_ADDR_UC
);
9156 hclge_uninit_vport_mac_list(vport
, HCLGE_MAC_ADDR_MC
);
9160 static int hclge_get_mac_ethertype_cmd_status(struct hclge_dev
*hdev
,
9161 u16 cmdq_resp
, u8 resp_code
)
9163 #define HCLGE_ETHERTYPE_SUCCESS_ADD 0
9164 #define HCLGE_ETHERTYPE_ALREADY_ADD 1
9165 #define HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW 2
9166 #define HCLGE_ETHERTYPE_KEY_CONFLICT 3
9171 dev_err(&hdev
->pdev
->dev
,
9172 "cmdq execute failed for get_mac_ethertype_cmd_status, status=%u.\n",
9177 switch (resp_code
) {
9178 case HCLGE_ETHERTYPE_SUCCESS_ADD
:
9179 case HCLGE_ETHERTYPE_ALREADY_ADD
:
9182 case HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW
:
9183 dev_err(&hdev
->pdev
->dev
,
9184 "add mac ethertype failed for manager table overflow.\n");
9185 return_status
= -EIO
;
9187 case HCLGE_ETHERTYPE_KEY_CONFLICT
:
9188 dev_err(&hdev
->pdev
->dev
,
9189 "add mac ethertype failed for key conflict.\n");
9190 return_status
= -EIO
;
9193 dev_err(&hdev
->pdev
->dev
,
9194 "add mac ethertype failed for undefined, code=%u.\n",
9196 return_status
= -EIO
;
9199 return return_status
;
9202 static int hclge_set_vf_mac(struct hnae3_handle
*handle
, int vf
,
9205 struct hclge_vport
*vport
= hclge_get_vport(handle
);
9206 char format_mac_addr
[HNAE3_FORMAT_MAC_ADDR_LEN
];
9207 struct hclge_dev
*hdev
= vport
->back
;
9209 vport
= hclge_get_vf_vport(hdev
, vf
);
9213 hnae3_format_mac_addr(format_mac_addr
, mac_addr
);
9214 if (ether_addr_equal(mac_addr
, vport
->vf_info
.mac
)) {
9215 dev_info(&hdev
->pdev
->dev
,
9216 "Specified MAC(=%s) is same as before, no change committed!\n",
9221 ether_addr_copy(vport
->vf_info
.mac
, mac_addr
);
9223 /* there is a timewindow for PF to know VF unalive, it may
9224 * cause send mailbox fail, but it doesn't matter, VF will
9225 * query it when reinit.
9227 if (test_bit(HCLGE_VPORT_STATE_ALIVE
, &vport
->state
)) {
9228 dev_info(&hdev
->pdev
->dev
,
9229 "MAC of VF %d has been set to %s, and it will be reinitialized!\n",
9230 vf
, format_mac_addr
);
9231 (void)hclge_inform_reset_assert_to_vf(vport
);
9235 dev_info(&hdev
->pdev
->dev
,
9236 "MAC of VF %d has been set to %s, will be active after VF reset\n",
9237 vf
, format_mac_addr
);
9241 static int hclge_add_mgr_tbl(struct hclge_dev
*hdev
,
9242 const struct hclge_mac_mgr_tbl_entry_cmd
*req
)
9244 struct hclge_desc desc
;
9249 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_MAC_ETHTYPE_ADD
, false);
9250 memcpy(desc
.data
, req
, sizeof(struct hclge_mac_mgr_tbl_entry_cmd
));
9252 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
9254 dev_err(&hdev
->pdev
->dev
,
9255 "add mac ethertype failed for cmd_send, ret =%d.\n",
9260 resp_code
= (le32_to_cpu(desc
.data
[0]) >> 8) & 0xff;
9261 retval
= le16_to_cpu(desc
.retval
);
9263 return hclge_get_mac_ethertype_cmd_status(hdev
, retval
, resp_code
);
9266 static int init_mgr_tbl(struct hclge_dev
*hdev
)
9271 for (i
= 0; i
< ARRAY_SIZE(hclge_mgr_table
); i
++) {
9272 ret
= hclge_add_mgr_tbl(hdev
, &hclge_mgr_table
[i
]);
9274 dev_err(&hdev
->pdev
->dev
,
9275 "add mac ethertype failed, ret =%d.\n",
9284 static void hclge_get_mac_addr(struct hnae3_handle
*handle
, u8
*p
)
9286 struct hclge_vport
*vport
= hclge_get_vport(handle
);
9287 struct hclge_dev
*hdev
= vport
->back
;
9289 ether_addr_copy(p
, hdev
->hw
.mac
.mac_addr
);
9292 int hclge_update_mac_node_for_dev_addr(struct hclge_vport
*vport
,
9293 const u8
*old_addr
, const u8
*new_addr
)
9295 struct list_head
*list
= &vport
->uc_mac_list
;
9296 struct hclge_mac_node
*old_node
, *new_node
;
9298 new_node
= hclge_find_mac_node(list
, new_addr
);
9300 new_node
= kzalloc(sizeof(*new_node
), GFP_ATOMIC
);
9304 new_node
->state
= HCLGE_MAC_TO_ADD
;
9305 ether_addr_copy(new_node
->mac_addr
, new_addr
);
9306 list_add(&new_node
->node
, list
);
9308 if (new_node
->state
== HCLGE_MAC_TO_DEL
)
9309 new_node
->state
= HCLGE_MAC_ACTIVE
;
9311 /* make sure the new addr is in the list head, avoid dev
9312 * addr may be not re-added into mac table for the umv space
9313 * limitation after global/imp reset which will clear mac
9314 * table by hardware.
9316 list_move(&new_node
->node
, list
);
9319 if (old_addr
&& !ether_addr_equal(old_addr
, new_addr
)) {
9320 old_node
= hclge_find_mac_node(list
, old_addr
);
9322 if (old_node
->state
== HCLGE_MAC_TO_ADD
) {
9323 list_del(&old_node
->node
);
9326 old_node
->state
= HCLGE_MAC_TO_DEL
;
9331 set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE
, &vport
->state
);
9336 static int hclge_set_mac_addr(struct hnae3_handle
*handle
, const void *p
,
9339 const unsigned char *new_addr
= (const unsigned char *)p
;
9340 struct hclge_vport
*vport
= hclge_get_vport(handle
);
9341 char format_mac_addr
[HNAE3_FORMAT_MAC_ADDR_LEN
];
9342 struct hclge_dev
*hdev
= vport
->back
;
9343 unsigned char *old_addr
= NULL
;
9346 /* mac addr check */
9347 if (is_zero_ether_addr(new_addr
) ||
9348 is_broadcast_ether_addr(new_addr
) ||
9349 is_multicast_ether_addr(new_addr
)) {
9350 hnae3_format_mac_addr(format_mac_addr
, new_addr
);
9351 dev_err(&hdev
->pdev
->dev
,
9352 "change uc mac err! invalid mac: %s.\n",
9357 ret
= hclge_pause_addr_cfg(hdev
, new_addr
);
9359 dev_err(&hdev
->pdev
->dev
,
9360 "failed to configure mac pause address, ret = %d\n",
9366 old_addr
= hdev
->hw
.mac
.mac_addr
;
9368 spin_lock_bh(&vport
->mac_list_lock
);
9369 ret
= hclge_update_mac_node_for_dev_addr(vport
, old_addr
, new_addr
);
9371 hnae3_format_mac_addr(format_mac_addr
, new_addr
);
9372 dev_err(&hdev
->pdev
->dev
,
9373 "failed to change the mac addr:%s, ret = %d\n",
9374 format_mac_addr
, ret
);
9375 spin_unlock_bh(&vport
->mac_list_lock
);
9378 hclge_pause_addr_cfg(hdev
, old_addr
);
9382 /* we must update dev addr with spin lock protect, preventing dev addr
9383 * being removed by set_rx_mode path.
9385 ether_addr_copy(hdev
->hw
.mac
.mac_addr
, new_addr
);
9386 spin_unlock_bh(&vport
->mac_list_lock
);
9388 hclge_task_schedule(hdev
, 0);
9393 static int hclge_mii_ioctl(struct hclge_dev
*hdev
, struct ifreq
*ifr
, int cmd
)
9395 struct mii_ioctl_data
*data
= if_mii(ifr
);
9397 if (!hnae3_dev_phy_imp_supported(hdev
))
9402 data
->phy_id
= hdev
->hw
.mac
.phy_addr
;
9403 /* this command reads phy id and register at the same time */
9406 data
->val_out
= hclge_read_phy_reg(hdev
, data
->reg_num
);
9410 return hclge_write_phy_reg(hdev
, data
->reg_num
, data
->val_in
);
9416 static int hclge_do_ioctl(struct hnae3_handle
*handle
, struct ifreq
*ifr
,
9419 struct hclge_vport
*vport
= hclge_get_vport(handle
);
9420 struct hclge_dev
*hdev
= vport
->back
;
9424 return hclge_ptp_get_cfg(hdev
, ifr
);
9426 return hclge_ptp_set_cfg(hdev
, ifr
);
9428 if (!hdev
->hw
.mac
.phydev
)
9429 return hclge_mii_ioctl(hdev
, ifr
, cmd
);
9432 return phy_mii_ioctl(hdev
->hw
.mac
.phydev
, ifr
, cmd
);
9435 static int hclge_set_port_vlan_filter_bypass(struct hclge_dev
*hdev
, u8 vf_id
,
9438 struct hclge_port_vlan_filter_bypass_cmd
*req
;
9439 struct hclge_desc desc
;
9442 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_PORT_VLAN_BYPASS
, false);
9443 req
= (struct hclge_port_vlan_filter_bypass_cmd
*)desc
.data
;
9445 hnae3_set_bit(req
->bypass_state
, HCLGE_INGRESS_BYPASS_B
,
9448 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
9450 dev_err(&hdev
->pdev
->dev
,
9451 "failed to set vport%u port vlan filter bypass state, ret = %d.\n",
9457 static int hclge_set_vlan_filter_ctrl(struct hclge_dev
*hdev
, u8 vlan_type
,
9458 u8 fe_type
, bool filter_en
, u8 vf_id
)
9460 struct hclge_vlan_filter_ctrl_cmd
*req
;
9461 struct hclge_desc desc
;
9464 /* read current vlan filter parameter */
9465 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_VLAN_FILTER_CTRL
, true);
9466 req
= (struct hclge_vlan_filter_ctrl_cmd
*)desc
.data
;
9467 req
->vlan_type
= vlan_type
;
9470 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
9472 dev_err(&hdev
->pdev
->dev
, "failed to get vport%u vlan filter config, ret = %d.\n",
9477 /* modify and write new config parameter */
9478 hclge_comm_cmd_reuse_desc(&desc
, false);
9479 req
->vlan_fe
= filter_en
?
9480 (req
->vlan_fe
| fe_type
) : (req
->vlan_fe
& ~fe_type
);
9482 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
9484 dev_err(&hdev
->pdev
->dev
, "failed to set vport%u vlan filter, ret = %d.\n",
9490 static int hclge_set_vport_vlan_filter(struct hclge_vport
*vport
, bool enable
)
9492 struct hclge_dev
*hdev
= vport
->back
;
9493 struct hnae3_ae_dev
*ae_dev
= hdev
->ae_dev
;
9496 if (hdev
->ae_dev
->dev_version
< HNAE3_DEVICE_VERSION_V2
)
9497 return hclge_set_vlan_filter_ctrl(hdev
, HCLGE_FILTER_TYPE_VF
,
9498 HCLGE_FILTER_FE_EGRESS_V1_B
,
9499 enable
, vport
->vport_id
);
9501 ret
= hclge_set_vlan_filter_ctrl(hdev
, HCLGE_FILTER_TYPE_VF
,
9502 HCLGE_FILTER_FE_EGRESS
, enable
,
9507 if (test_bit(HNAE3_DEV_SUPPORT_PORT_VLAN_BYPASS_B
, ae_dev
->caps
)) {
9508 ret
= hclge_set_port_vlan_filter_bypass(hdev
, vport
->vport_id
,
9510 } else if (!vport
->vport_id
) {
9511 if (test_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B
, ae_dev
->caps
))
9514 ret
= hclge_set_vlan_filter_ctrl(hdev
, HCLGE_FILTER_TYPE_PORT
,
9515 HCLGE_FILTER_FE_INGRESS
,
9522 static bool hclge_need_enable_vport_vlan_filter(struct hclge_vport
*vport
)
9524 struct hnae3_handle
*handle
= &vport
->nic
;
9525 struct hclge_vport_vlan_cfg
*vlan
, *tmp
;
9526 struct hclge_dev
*hdev
= vport
->back
;
9528 if (vport
->vport_id
) {
9529 if (vport
->port_base_vlan_cfg
.state
!=
9530 HNAE3_PORT_BASE_VLAN_DISABLE
)
9533 if (vport
->vf_info
.trusted
&& vport
->vf_info
.request_uc_en
)
9535 } else if (handle
->netdev_flags
& HNAE3_USER_UPE
) {
9539 if (!vport
->req_vlan_fltr_en
)
9542 /* compatible with former device, always enable vlan filter */
9543 if (!test_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B
, hdev
->ae_dev
->caps
))
9546 list_for_each_entry_safe(vlan
, tmp
, &vport
->vlan_list
, node
)
9547 if (vlan
->vlan_id
!= 0)
9553 int hclge_enable_vport_vlan_filter(struct hclge_vport
*vport
, bool request_en
)
9555 struct hclge_dev
*hdev
= vport
->back
;
9559 mutex_lock(&hdev
->vport_lock
);
9561 vport
->req_vlan_fltr_en
= request_en
;
9563 need_en
= hclge_need_enable_vport_vlan_filter(vport
);
9564 if (need_en
== vport
->cur_vlan_fltr_en
) {
9565 mutex_unlock(&hdev
->vport_lock
);
9569 ret
= hclge_set_vport_vlan_filter(vport
, need_en
);
9571 mutex_unlock(&hdev
->vport_lock
);
9575 vport
->cur_vlan_fltr_en
= need_en
;
9577 mutex_unlock(&hdev
->vport_lock
);
9582 static int hclge_enable_vlan_filter(struct hnae3_handle
*handle
, bool enable
)
9584 struct hclge_vport
*vport
= hclge_get_vport(handle
);
9586 return hclge_enable_vport_vlan_filter(vport
, enable
);
9589 static int hclge_set_vf_vlan_filter_cmd(struct hclge_dev
*hdev
, u16 vfid
,
9590 bool is_kill
, u16 vlan
,
9591 struct hclge_desc
*desc
)
9593 struct hclge_vlan_filter_vf_cfg_cmd
*req0
;
9594 struct hclge_vlan_filter_vf_cfg_cmd
*req1
;
9599 hclge_cmd_setup_basic_desc(&desc
[0],
9600 HCLGE_OPC_VLAN_FILTER_VF_CFG
, false);
9601 hclge_cmd_setup_basic_desc(&desc
[1],
9602 HCLGE_OPC_VLAN_FILTER_VF_CFG
, false);
9604 desc
[0].flag
|= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT
);
9606 vf_byte_off
= vfid
/ 8;
9607 vf_byte_val
= 1 << (vfid
% 8);
9609 req0
= (struct hclge_vlan_filter_vf_cfg_cmd
*)desc
[0].data
;
9610 req1
= (struct hclge_vlan_filter_vf_cfg_cmd
*)desc
[1].data
;
9612 req0
->vlan_id
= cpu_to_le16(vlan
);
9613 req0
->vlan_cfg
= is_kill
;
9615 if (vf_byte_off
< HCLGE_MAX_VF_BYTES
)
9616 req0
->vf_bitmap
[vf_byte_off
] = vf_byte_val
;
9618 req1
->vf_bitmap
[vf_byte_off
- HCLGE_MAX_VF_BYTES
] = vf_byte_val
;
9620 ret
= hclge_cmd_send(&hdev
->hw
, desc
, 2);
9622 dev_err(&hdev
->pdev
->dev
,
9623 "Send vf vlan command fail, ret =%d.\n",
9631 static int hclge_check_vf_vlan_cmd_status(struct hclge_dev
*hdev
, u16 vfid
,
9632 bool is_kill
, struct hclge_desc
*desc
)
9634 struct hclge_vlan_filter_vf_cfg_cmd
*req
;
9636 req
= (struct hclge_vlan_filter_vf_cfg_cmd
*)desc
[0].data
;
9639 #define HCLGE_VF_VLAN_NO_ENTRY 2
9640 if (!req
->resp_code
|| req
->resp_code
== 1)
9643 if (req
->resp_code
== HCLGE_VF_VLAN_NO_ENTRY
) {
9644 set_bit(vfid
, hdev
->vf_vlan_full
);
9645 dev_warn(&hdev
->pdev
->dev
,
9646 "vf vlan table is full, vf vlan filter is disabled\n");
9650 dev_err(&hdev
->pdev
->dev
,
9651 "Add vf vlan filter fail, ret =%u.\n",
9654 #define HCLGE_VF_VLAN_DEL_NO_FOUND 1
9655 if (!req
->resp_code
)
9658 /* vf vlan filter is disabled when vf vlan table is full,
9659 * then new vlan id will not be added into vf vlan table.
9660 * Just return 0 without warning, avoid massive verbose
9661 * print logs when unload.
9663 if (req
->resp_code
== HCLGE_VF_VLAN_DEL_NO_FOUND
)
9666 dev_err(&hdev
->pdev
->dev
,
9667 "Kill vf vlan filter fail, ret =%u.\n",
9674 static int hclge_set_vf_vlan_common(struct hclge_dev
*hdev
, u16 vfid
,
9675 bool is_kill
, u16 vlan
)
9677 struct hclge_vport
*vport
= &hdev
->vport
[vfid
];
9678 struct hclge_desc desc
[2];
9681 /* if vf vlan table is full, firmware will close vf vlan filter, it
9682 * is unable and unnecessary to add new vlan id to vf vlan filter.
9683 * If spoof check is enable, and vf vlan is full, it shouldn't add
9684 * new vlan, because tx packets with these vlan id will be dropped.
9686 if (test_bit(vfid
, hdev
->vf_vlan_full
) && !is_kill
) {
9687 if (vport
->vf_info
.spoofchk
&& vlan
) {
9688 dev_err(&hdev
->pdev
->dev
,
9689 "Can't add vlan due to spoof check is on and vf vlan table is full\n");
9695 ret
= hclge_set_vf_vlan_filter_cmd(hdev
, vfid
, is_kill
, vlan
, desc
);
9699 return hclge_check_vf_vlan_cmd_status(hdev
, vfid
, is_kill
, desc
);
9702 static int hclge_set_port_vlan_filter(struct hclge_dev
*hdev
, __be16 proto
,
9703 u16 vlan_id
, bool is_kill
)
9705 struct hclge_vlan_filter_pf_cfg_cmd
*req
;
9706 struct hclge_desc desc
;
9707 u8 vlan_offset_byte_val
;
9708 u8 vlan_offset_byte
;
9712 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_VLAN_FILTER_PF_CFG
, false);
9714 vlan_offset_160
= vlan_id
/ HCLGE_VLAN_ID_OFFSET_STEP
;
9715 vlan_offset_byte
= (vlan_id
% HCLGE_VLAN_ID_OFFSET_STEP
) /
9716 HCLGE_VLAN_BYTE_SIZE
;
9717 vlan_offset_byte_val
= 1 << (vlan_id
% HCLGE_VLAN_BYTE_SIZE
);
9719 req
= (struct hclge_vlan_filter_pf_cfg_cmd
*)desc
.data
;
9720 req
->vlan_offset
= vlan_offset_160
;
9721 req
->vlan_cfg
= is_kill
;
9722 req
->vlan_offset_bitmap
[vlan_offset_byte
] = vlan_offset_byte_val
;
9724 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
9726 dev_err(&hdev
->pdev
->dev
,
9727 "port vlan command, send fail, ret =%d.\n", ret
);
9731 static bool hclge_need_update_port_vlan(struct hclge_dev
*hdev
, u16 vport_id
,
9732 u16 vlan_id
, bool is_kill
)
9734 /* vlan 0 may be added twice when 8021q module is enabled */
9735 if (!is_kill
&& !vlan_id
&&
9736 test_bit(vport_id
, hdev
->vlan_table
[vlan_id
]))
9739 if (!is_kill
&& test_and_set_bit(vport_id
, hdev
->vlan_table
[vlan_id
])) {
9740 dev_warn(&hdev
->pdev
->dev
,
9741 "Add port vlan failed, vport %u is already in vlan %u\n",
9747 !test_and_clear_bit(vport_id
, hdev
->vlan_table
[vlan_id
])) {
9748 dev_warn(&hdev
->pdev
->dev
,
9749 "Delete port vlan failed, vport %u is not in vlan %u\n",
9757 static int hclge_set_vlan_filter_hw(struct hclge_dev
*hdev
, __be16 proto
,
9758 u16 vport_id
, u16 vlan_id
,
9761 u16 vport_idx
, vport_num
= 0;
9764 if (is_kill
&& !vlan_id
)
9767 if (vlan_id
>= VLAN_N_VID
)
9770 ret
= hclge_set_vf_vlan_common(hdev
, vport_id
, is_kill
, vlan_id
);
9772 dev_err(&hdev
->pdev
->dev
,
9773 "Set %u vport vlan filter config fail, ret =%d.\n",
9778 if (!hclge_need_update_port_vlan(hdev
, vport_id
, vlan_id
, is_kill
))
9781 for_each_set_bit(vport_idx
, hdev
->vlan_table
[vlan_id
], HCLGE_VPORT_NUM
)
9784 if ((is_kill
&& vport_num
== 0) || (!is_kill
&& vport_num
== 1))
9785 ret
= hclge_set_port_vlan_filter(hdev
, proto
, vlan_id
,
9791 static int hclge_set_vlan_tx_offload_cfg(struct hclge_vport
*vport
)
9793 struct hclge_tx_vtag_cfg
*vcfg
= &vport
->txvlan_cfg
;
9794 struct hclge_vport_vtag_tx_cfg_cmd
*req
;
9795 struct hclge_dev
*hdev
= vport
->back
;
9796 struct hclge_desc desc
;
9800 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_VLAN_PORT_TX_CFG
, false);
9802 req
= (struct hclge_vport_vtag_tx_cfg_cmd
*)desc
.data
;
9803 req
->def_vlan_tag1
= cpu_to_le16(vcfg
->default_tag1
);
9804 req
->def_vlan_tag2
= cpu_to_le16(vcfg
->default_tag2
);
9805 hnae3_set_bit(req
->vport_vlan_cfg
, HCLGE_ACCEPT_TAG1_B
,
9806 vcfg
->accept_tag1
? 1 : 0);
9807 hnae3_set_bit(req
->vport_vlan_cfg
, HCLGE_ACCEPT_UNTAG1_B
,
9808 vcfg
->accept_untag1
? 1 : 0);
9809 hnae3_set_bit(req
->vport_vlan_cfg
, HCLGE_ACCEPT_TAG2_B
,
9810 vcfg
->accept_tag2
? 1 : 0);
9811 hnae3_set_bit(req
->vport_vlan_cfg
, HCLGE_ACCEPT_UNTAG2_B
,
9812 vcfg
->accept_untag2
? 1 : 0);
9813 hnae3_set_bit(req
->vport_vlan_cfg
, HCLGE_PORT_INS_TAG1_EN_B
,
9814 vcfg
->insert_tag1_en
? 1 : 0);
9815 hnae3_set_bit(req
->vport_vlan_cfg
, HCLGE_PORT_INS_TAG2_EN_B
,
9816 vcfg
->insert_tag2_en
? 1 : 0);
9817 hnae3_set_bit(req
->vport_vlan_cfg
, HCLGE_TAG_SHIFT_MODE_EN_B
,
9818 vcfg
->tag_shift_mode_en
? 1 : 0);
9819 hnae3_set_bit(req
->vport_vlan_cfg
, HCLGE_CFG_NIC_ROCE_SEL_B
, 0);
9821 req
->vf_offset
= vport
->vport_id
/ HCLGE_VF_NUM_PER_CMD
;
9822 bmap_index
= vport
->vport_id
% HCLGE_VF_NUM_PER_CMD
/
9823 HCLGE_VF_NUM_PER_BYTE
;
9824 req
->vf_bitmap
[bmap_index
] =
9825 1U << (vport
->vport_id
% HCLGE_VF_NUM_PER_BYTE
);
9827 status
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
9829 dev_err(&hdev
->pdev
->dev
,
9830 "Send port txvlan cfg command fail, ret =%d\n",
9836 static int hclge_set_vlan_rx_offload_cfg(struct hclge_vport
*vport
)
9838 struct hclge_rx_vtag_cfg
*vcfg
= &vport
->rxvlan_cfg
;
9839 struct hclge_vport_vtag_rx_cfg_cmd
*req
;
9840 struct hclge_dev
*hdev
= vport
->back
;
9841 struct hclge_desc desc
;
9845 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_VLAN_PORT_RX_CFG
, false);
9847 req
= (struct hclge_vport_vtag_rx_cfg_cmd
*)desc
.data
;
9848 hnae3_set_bit(req
->vport_vlan_cfg
, HCLGE_REM_TAG1_EN_B
,
9849 vcfg
->strip_tag1_en
? 1 : 0);
9850 hnae3_set_bit(req
->vport_vlan_cfg
, HCLGE_REM_TAG2_EN_B
,
9851 vcfg
->strip_tag2_en
? 1 : 0);
9852 hnae3_set_bit(req
->vport_vlan_cfg
, HCLGE_SHOW_TAG1_EN_B
,
9853 vcfg
->vlan1_vlan_prionly
? 1 : 0);
9854 hnae3_set_bit(req
->vport_vlan_cfg
, HCLGE_SHOW_TAG2_EN_B
,
9855 vcfg
->vlan2_vlan_prionly
? 1 : 0);
9856 hnae3_set_bit(req
->vport_vlan_cfg
, HCLGE_DISCARD_TAG1_EN_B
,
9857 vcfg
->strip_tag1_discard_en
? 1 : 0);
9858 hnae3_set_bit(req
->vport_vlan_cfg
, HCLGE_DISCARD_TAG2_EN_B
,
9859 vcfg
->strip_tag2_discard_en
? 1 : 0);
9861 req
->vf_offset
= vport
->vport_id
/ HCLGE_VF_NUM_PER_CMD
;
9862 bmap_index
= vport
->vport_id
% HCLGE_VF_NUM_PER_CMD
/
9863 HCLGE_VF_NUM_PER_BYTE
;
9864 req
->vf_bitmap
[bmap_index
] =
9865 1U << (vport
->vport_id
% HCLGE_VF_NUM_PER_BYTE
);
9867 status
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
9869 dev_err(&hdev
->pdev
->dev
,
9870 "Send port rxvlan cfg command fail, ret =%d\n",
9876 static int hclge_vlan_offload_cfg(struct hclge_vport
*vport
,
9877 u16 port_base_vlan_state
,
9878 u16 vlan_tag
, u8 qos
)
9882 if (port_base_vlan_state
== HNAE3_PORT_BASE_VLAN_DISABLE
) {
9883 vport
->txvlan_cfg
.accept_tag1
= true;
9884 vport
->txvlan_cfg
.insert_tag1_en
= false;
9885 vport
->txvlan_cfg
.default_tag1
= 0;
9887 struct hnae3_ae_dev
*ae_dev
= pci_get_drvdata(vport
->nic
.pdev
);
9889 vport
->txvlan_cfg
.accept_tag1
=
9890 ae_dev
->dev_version
>= HNAE3_DEVICE_VERSION_V3
;
9891 vport
->txvlan_cfg
.insert_tag1_en
= true;
9892 vport
->txvlan_cfg
.default_tag1
= (qos
<< VLAN_PRIO_SHIFT
) |
9896 vport
->txvlan_cfg
.accept_untag1
= true;
9898 /* accept_tag2 and accept_untag2 are not supported on
9899 * pdev revision(0x20), new revision support them,
9900 * this two fields can not be configured by user.
9902 vport
->txvlan_cfg
.accept_tag2
= true;
9903 vport
->txvlan_cfg
.accept_untag2
= true;
9904 vport
->txvlan_cfg
.insert_tag2_en
= false;
9905 vport
->txvlan_cfg
.default_tag2
= 0;
9906 vport
->txvlan_cfg
.tag_shift_mode_en
= true;
9908 if (port_base_vlan_state
== HNAE3_PORT_BASE_VLAN_DISABLE
) {
9909 vport
->rxvlan_cfg
.strip_tag1_en
= false;
9910 vport
->rxvlan_cfg
.strip_tag2_en
=
9911 vport
->rxvlan_cfg
.rx_vlan_offload_en
;
9912 vport
->rxvlan_cfg
.strip_tag2_discard_en
= false;
9914 vport
->rxvlan_cfg
.strip_tag1_en
=
9915 vport
->rxvlan_cfg
.rx_vlan_offload_en
;
9916 vport
->rxvlan_cfg
.strip_tag2_en
= true;
9917 vport
->rxvlan_cfg
.strip_tag2_discard_en
= true;
9920 vport
->rxvlan_cfg
.strip_tag1_discard_en
= false;
9921 vport
->rxvlan_cfg
.vlan1_vlan_prionly
= false;
9922 vport
->rxvlan_cfg
.vlan2_vlan_prionly
= false;
9924 ret
= hclge_set_vlan_tx_offload_cfg(vport
);
9928 return hclge_set_vlan_rx_offload_cfg(vport
);
9931 static int hclge_set_vlan_protocol_type(struct hclge_dev
*hdev
)
9933 struct hclge_rx_vlan_type_cfg_cmd
*rx_req
;
9934 struct hclge_tx_vlan_type_cfg_cmd
*tx_req
;
9935 struct hclge_desc desc
;
9938 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_MAC_VLAN_TYPE_ID
, false);
9939 rx_req
= (struct hclge_rx_vlan_type_cfg_cmd
*)desc
.data
;
9940 rx_req
->ot_fst_vlan_type
=
9941 cpu_to_le16(hdev
->vlan_type_cfg
.rx_ot_fst_vlan_type
);
9942 rx_req
->ot_sec_vlan_type
=
9943 cpu_to_le16(hdev
->vlan_type_cfg
.rx_ot_sec_vlan_type
);
9944 rx_req
->in_fst_vlan_type
=
9945 cpu_to_le16(hdev
->vlan_type_cfg
.rx_in_fst_vlan_type
);
9946 rx_req
->in_sec_vlan_type
=
9947 cpu_to_le16(hdev
->vlan_type_cfg
.rx_in_sec_vlan_type
);
9949 status
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
9951 dev_err(&hdev
->pdev
->dev
,
9952 "Send rxvlan protocol type command fail, ret =%d\n",
9957 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_MAC_VLAN_INSERT
, false);
9959 tx_req
= (struct hclge_tx_vlan_type_cfg_cmd
*)desc
.data
;
9960 tx_req
->ot_vlan_type
= cpu_to_le16(hdev
->vlan_type_cfg
.tx_ot_vlan_type
);
9961 tx_req
->in_vlan_type
= cpu_to_le16(hdev
->vlan_type_cfg
.tx_in_vlan_type
);
9963 status
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
9965 dev_err(&hdev
->pdev
->dev
,
9966 "Send txvlan protocol type command fail, ret =%d\n",
9972 static int hclge_init_vlan_filter(struct hclge_dev
*hdev
)
9974 struct hclge_vport
*vport
;
9979 if (hdev
->ae_dev
->dev_version
< HNAE3_DEVICE_VERSION_V2
)
9980 return hclge_set_vlan_filter_ctrl(hdev
, HCLGE_FILTER_TYPE_VF
,
9981 HCLGE_FILTER_FE_EGRESS_V1_B
,
9984 /* for revision 0x21, vf vlan filter is per function */
9985 for (i
= 0; i
< hdev
->num_alloc_vport
; i
++) {
9986 vport
= &hdev
->vport
[i
];
9987 ret
= hclge_set_vlan_filter_ctrl(hdev
, HCLGE_FILTER_TYPE_VF
,
9988 HCLGE_FILTER_FE_EGRESS
, true,
9992 vport
->cur_vlan_fltr_en
= true;
9995 if (test_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B
, hdev
->ae_dev
->caps
) &&
9996 !test_bit(HNAE3_DEV_SUPPORT_PORT_VLAN_BYPASS_B
, hdev
->ae_dev
->caps
))
9999 return hclge_set_vlan_filter_ctrl(hdev
, HCLGE_FILTER_TYPE_PORT
,
10000 HCLGE_FILTER_FE_INGRESS
, enable
, 0);
10003 static int hclge_init_vlan_type(struct hclge_dev
*hdev
)
10005 hdev
->vlan_type_cfg
.rx_in_fst_vlan_type
= ETH_P_8021Q
;
10006 hdev
->vlan_type_cfg
.rx_in_sec_vlan_type
= ETH_P_8021Q
;
10007 hdev
->vlan_type_cfg
.rx_ot_fst_vlan_type
= ETH_P_8021Q
;
10008 hdev
->vlan_type_cfg
.rx_ot_sec_vlan_type
= ETH_P_8021Q
;
10009 hdev
->vlan_type_cfg
.tx_ot_vlan_type
= ETH_P_8021Q
;
10010 hdev
->vlan_type_cfg
.tx_in_vlan_type
= ETH_P_8021Q
;
10012 return hclge_set_vlan_protocol_type(hdev
);
10015 static int hclge_init_vport_vlan_offload(struct hclge_dev
*hdev
)
10017 struct hclge_port_base_vlan_config
*cfg
;
10018 struct hclge_vport
*vport
;
10022 for (i
= 0; i
< hdev
->num_alloc_vport
; i
++) {
10023 vport
= &hdev
->vport
[i
];
10024 cfg
= &vport
->port_base_vlan_cfg
;
10026 ret
= hclge_vlan_offload_cfg(vport
, cfg
->state
,
10027 cfg
->vlan_info
.vlan_tag
,
10028 cfg
->vlan_info
.qos
);
10035 static int hclge_init_vlan_config(struct hclge_dev
*hdev
)
10037 struct hnae3_handle
*handle
= &hdev
->vport
[0].nic
;
10040 ret
= hclge_init_vlan_filter(hdev
);
10044 ret
= hclge_init_vlan_type(hdev
);
10048 ret
= hclge_init_vport_vlan_offload(hdev
);
10052 return hclge_set_vlan_filter(handle
, htons(ETH_P_8021Q
), 0, false);
10055 static void hclge_add_vport_vlan_table(struct hclge_vport
*vport
, u16 vlan_id
,
10056 bool writen_to_tbl
)
10058 struct hclge_vport_vlan_cfg
*vlan
, *tmp
;
10059 struct hclge_dev
*hdev
= vport
->back
;
10061 mutex_lock(&hdev
->vport_lock
);
10063 list_for_each_entry_safe(vlan
, tmp
, &vport
->vlan_list
, node
) {
10064 if (vlan
->vlan_id
== vlan_id
) {
10065 mutex_unlock(&hdev
->vport_lock
);
10070 vlan
= kzalloc(sizeof(*vlan
), GFP_KERNEL
);
10072 mutex_unlock(&hdev
->vport_lock
);
10076 vlan
->hd_tbl_status
= writen_to_tbl
;
10077 vlan
->vlan_id
= vlan_id
;
10079 list_add_tail(&vlan
->node
, &vport
->vlan_list
);
10080 mutex_unlock(&hdev
->vport_lock
);
10083 static int hclge_add_vport_all_vlan_table(struct hclge_vport
*vport
)
10085 struct hclge_vport_vlan_cfg
*vlan
, *tmp
;
10086 struct hclge_dev
*hdev
= vport
->back
;
10089 mutex_lock(&hdev
->vport_lock
);
10091 list_for_each_entry_safe(vlan
, tmp
, &vport
->vlan_list
, node
) {
10092 if (!vlan
->hd_tbl_status
) {
10093 ret
= hclge_set_vlan_filter_hw(hdev
, htons(ETH_P_8021Q
),
10095 vlan
->vlan_id
, false);
10097 dev_err(&hdev
->pdev
->dev
,
10098 "restore vport vlan list failed, ret=%d\n",
10101 mutex_unlock(&hdev
->vport_lock
);
10105 vlan
->hd_tbl_status
= true;
10108 mutex_unlock(&hdev
->vport_lock
);
10113 static void hclge_rm_vport_vlan_table(struct hclge_vport
*vport
, u16 vlan_id
,
10116 struct hclge_vport_vlan_cfg
*vlan
, *tmp
;
10117 struct hclge_dev
*hdev
= vport
->back
;
10119 list_for_each_entry_safe(vlan
, tmp
, &vport
->vlan_list
, node
) {
10120 if (vlan
->vlan_id
== vlan_id
) {
10121 if (is_write_tbl
&& vlan
->hd_tbl_status
)
10122 hclge_set_vlan_filter_hw(hdev
,
10123 htons(ETH_P_8021Q
),
10128 list_del(&vlan
->node
);
10135 void hclge_rm_vport_all_vlan_table(struct hclge_vport
*vport
, bool is_del_list
)
10137 struct hclge_vport_vlan_cfg
*vlan
, *tmp
;
10138 struct hclge_dev
*hdev
= vport
->back
;
10140 mutex_lock(&hdev
->vport_lock
);
10142 list_for_each_entry_safe(vlan
, tmp
, &vport
->vlan_list
, node
) {
10143 if (vlan
->hd_tbl_status
)
10144 hclge_set_vlan_filter_hw(hdev
,
10145 htons(ETH_P_8021Q
),
10150 vlan
->hd_tbl_status
= false;
10152 list_del(&vlan
->node
);
10156 clear_bit(vport
->vport_id
, hdev
->vf_vlan_full
);
10157 mutex_unlock(&hdev
->vport_lock
);
10160 void hclge_uninit_vport_vlan_table(struct hclge_dev
*hdev
)
10162 struct hclge_vport_vlan_cfg
*vlan
, *tmp
;
10163 struct hclge_vport
*vport
;
10166 mutex_lock(&hdev
->vport_lock
);
10168 for (i
= 0; i
< hdev
->num_alloc_vport
; i
++) {
10169 vport
= &hdev
->vport
[i
];
10170 list_for_each_entry_safe(vlan
, tmp
, &vport
->vlan_list
, node
) {
10171 list_del(&vlan
->node
);
10176 mutex_unlock(&hdev
->vport_lock
);
10179 void hclge_restore_vport_port_base_vlan_config(struct hclge_dev
*hdev
)
10181 struct hclge_vlan_info
*vlan_info
;
10182 struct hclge_vport
*vport
;
10189 /* PF should restore all vfs port base vlan */
10190 for (vf_id
= 0; vf_id
< hdev
->num_alloc_vfs
; vf_id
++) {
10191 vport
= &hdev
->vport
[vf_id
+ HCLGE_VF_VPORT_START_NUM
];
10192 vlan_info
= vport
->port_base_vlan_cfg
.tbl_sta
?
10193 &vport
->port_base_vlan_cfg
.vlan_info
:
10194 &vport
->port_base_vlan_cfg
.old_vlan_info
;
10196 vlan_id
= vlan_info
->vlan_tag
;
10197 vlan_proto
= vlan_info
->vlan_proto
;
10198 state
= vport
->port_base_vlan_cfg
.state
;
10200 if (state
!= HNAE3_PORT_BASE_VLAN_DISABLE
) {
10201 clear_bit(vport
->vport_id
, hdev
->vlan_table
[vlan_id
]);
10202 ret
= hclge_set_vlan_filter_hw(hdev
, htons(vlan_proto
),
10205 vport
->port_base_vlan_cfg
.tbl_sta
= ret
== 0;
10210 void hclge_restore_vport_vlan_table(struct hclge_vport
*vport
)
10212 struct hclge_vport_vlan_cfg
*vlan
, *tmp
;
10213 struct hclge_dev
*hdev
= vport
->back
;
10216 mutex_lock(&hdev
->vport_lock
);
10218 if (vport
->port_base_vlan_cfg
.state
== HNAE3_PORT_BASE_VLAN_DISABLE
) {
10219 list_for_each_entry_safe(vlan
, tmp
, &vport
->vlan_list
, node
) {
10220 ret
= hclge_set_vlan_filter_hw(hdev
, htons(ETH_P_8021Q
),
10222 vlan
->vlan_id
, false);
10225 vlan
->hd_tbl_status
= true;
10229 mutex_unlock(&hdev
->vport_lock
);
10232 /* For global reset and imp reset, hardware will clear the mac table,
10233 * so we change the mac address state from ACTIVE to TO_ADD, then they
10234 * can be restored in the service task after reset complete. Furtherly,
10235 * the mac addresses with state TO_DEL or DEL_FAIL are unnecessary to
10236 * be restored after reset, so just remove these mac nodes from mac_list.
10238 static void hclge_mac_node_convert_for_reset(struct list_head
*list
)
10240 struct hclge_mac_node
*mac_node
, *tmp
;
10242 list_for_each_entry_safe(mac_node
, tmp
, list
, node
) {
10243 if (mac_node
->state
== HCLGE_MAC_ACTIVE
) {
10244 mac_node
->state
= HCLGE_MAC_TO_ADD
;
10245 } else if (mac_node
->state
== HCLGE_MAC_TO_DEL
) {
10246 list_del(&mac_node
->node
);
10252 void hclge_restore_mac_table_common(struct hclge_vport
*vport
)
10254 spin_lock_bh(&vport
->mac_list_lock
);
10256 hclge_mac_node_convert_for_reset(&vport
->uc_mac_list
);
10257 hclge_mac_node_convert_for_reset(&vport
->mc_mac_list
);
10258 set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE
, &vport
->state
);
10260 spin_unlock_bh(&vport
->mac_list_lock
);
10263 static void hclge_restore_hw_table(struct hclge_dev
*hdev
)
10265 struct hclge_vport
*vport
= &hdev
->vport
[0];
10266 struct hnae3_handle
*handle
= &vport
->nic
;
10268 hclge_restore_mac_table_common(vport
);
10269 hclge_restore_vport_port_base_vlan_config(hdev
);
10270 hclge_restore_vport_vlan_table(vport
);
10271 set_bit(HCLGE_STATE_FD_USER_DEF_CHANGED
, &hdev
->state
);
10272 hclge_restore_fd_entries(handle
);
10275 int hclge_en_hw_strip_rxvtag(struct hnae3_handle
*handle
, bool enable
)
10277 struct hclge_vport
*vport
= hclge_get_vport(handle
);
10279 if (vport
->port_base_vlan_cfg
.state
== HNAE3_PORT_BASE_VLAN_DISABLE
) {
10280 vport
->rxvlan_cfg
.strip_tag1_en
= false;
10281 vport
->rxvlan_cfg
.strip_tag2_en
= enable
;
10282 vport
->rxvlan_cfg
.strip_tag2_discard_en
= false;
10284 vport
->rxvlan_cfg
.strip_tag1_en
= enable
;
10285 vport
->rxvlan_cfg
.strip_tag2_en
= true;
10286 vport
->rxvlan_cfg
.strip_tag2_discard_en
= true;
10289 vport
->rxvlan_cfg
.strip_tag1_discard_en
= false;
10290 vport
->rxvlan_cfg
.vlan1_vlan_prionly
= false;
10291 vport
->rxvlan_cfg
.vlan2_vlan_prionly
= false;
10292 vport
->rxvlan_cfg
.rx_vlan_offload_en
= enable
;
10294 return hclge_set_vlan_rx_offload_cfg(vport
);
10297 static void hclge_set_vport_vlan_fltr_change(struct hclge_vport
*vport
)
10299 struct hclge_dev
*hdev
= vport
->back
;
10301 if (test_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B
, hdev
->ae_dev
->caps
))
10302 set_bit(HCLGE_VPORT_STATE_VLAN_FLTR_CHANGE
, &vport
->state
);
10305 static int hclge_update_vlan_filter_entries(struct hclge_vport
*vport
,
10306 u16 port_base_vlan_state
,
10307 struct hclge_vlan_info
*new_info
,
10308 struct hclge_vlan_info
*old_info
)
10310 struct hclge_dev
*hdev
= vport
->back
;
10313 if (port_base_vlan_state
== HNAE3_PORT_BASE_VLAN_ENABLE
) {
10314 hclge_rm_vport_all_vlan_table(vport
, false);
10315 /* force clear VLAN 0 */
10316 ret
= hclge_set_vf_vlan_common(hdev
, vport
->vport_id
, true, 0);
10319 return hclge_set_vlan_filter_hw(hdev
,
10320 htons(new_info
->vlan_proto
),
10322 new_info
->vlan_tag
,
10326 vport
->port_base_vlan_cfg
.tbl_sta
= false;
10328 /* force add VLAN 0 */
10329 ret
= hclge_set_vf_vlan_common(hdev
, vport
->vport_id
, false, 0);
10333 ret
= hclge_set_vlan_filter_hw(hdev
, htons(old_info
->vlan_proto
),
10334 vport
->vport_id
, old_info
->vlan_tag
,
10339 return hclge_add_vport_all_vlan_table(vport
);
10342 static bool hclge_need_update_vlan_filter(const struct hclge_vlan_info
*new_cfg
,
10343 const struct hclge_vlan_info
*old_cfg
)
10345 if (new_cfg
->vlan_tag
!= old_cfg
->vlan_tag
)
10348 if (new_cfg
->vlan_tag
== 0 && (new_cfg
->qos
== 0 || old_cfg
->qos
== 0))
10354 static int hclge_modify_port_base_vlan_tag(struct hclge_vport
*vport
,
10355 struct hclge_vlan_info
*new_info
,
10356 struct hclge_vlan_info
*old_info
)
10358 struct hclge_dev
*hdev
= vport
->back
;
10361 /* add new VLAN tag */
10362 ret
= hclge_set_vlan_filter_hw(hdev
, htons(new_info
->vlan_proto
),
10363 vport
->vport_id
, new_info
->vlan_tag
,
10368 vport
->port_base_vlan_cfg
.tbl_sta
= false;
10369 /* remove old VLAN tag */
10370 if (old_info
->vlan_tag
== 0)
10371 ret
= hclge_set_vf_vlan_common(hdev
, vport
->vport_id
,
10374 ret
= hclge_set_vlan_filter_hw(hdev
, htons(ETH_P_8021Q
),
10376 old_info
->vlan_tag
, true);
10378 dev_err(&hdev
->pdev
->dev
,
10379 "failed to clear vport%u port base vlan %u, ret = %d.\n",
10380 vport
->vport_id
, old_info
->vlan_tag
, ret
);
10385 int hclge_update_port_base_vlan_cfg(struct hclge_vport
*vport
, u16 state
,
10386 struct hclge_vlan_info
*vlan_info
)
10388 struct hnae3_handle
*nic
= &vport
->nic
;
10389 struct hclge_vlan_info
*old_vlan_info
;
10392 old_vlan_info
= &vport
->port_base_vlan_cfg
.vlan_info
;
10394 ret
= hclge_vlan_offload_cfg(vport
, state
, vlan_info
->vlan_tag
,
10399 if (!hclge_need_update_vlan_filter(vlan_info
, old_vlan_info
))
10402 if (state
== HNAE3_PORT_BASE_VLAN_MODIFY
)
10403 ret
= hclge_modify_port_base_vlan_tag(vport
, vlan_info
,
10406 ret
= hclge_update_vlan_filter_entries(vport
, state
, vlan_info
,
10412 vport
->port_base_vlan_cfg
.state
= state
;
10413 if (state
== HNAE3_PORT_BASE_VLAN_DISABLE
)
10414 nic
->port_base_vlan_state
= HNAE3_PORT_BASE_VLAN_DISABLE
;
10416 nic
->port_base_vlan_state
= HNAE3_PORT_BASE_VLAN_ENABLE
;
10418 vport
->port_base_vlan_cfg
.old_vlan_info
= *old_vlan_info
;
10419 vport
->port_base_vlan_cfg
.vlan_info
= *vlan_info
;
10420 vport
->port_base_vlan_cfg
.tbl_sta
= true;
10421 hclge_set_vport_vlan_fltr_change(vport
);
10426 static u16
hclge_get_port_base_vlan_state(struct hclge_vport
*vport
,
10427 enum hnae3_port_base_vlan_state state
,
10430 if (state
== HNAE3_PORT_BASE_VLAN_DISABLE
) {
10432 return HNAE3_PORT_BASE_VLAN_NOCHANGE
;
10434 return HNAE3_PORT_BASE_VLAN_ENABLE
;
10438 return HNAE3_PORT_BASE_VLAN_DISABLE
;
10440 if (vport
->port_base_vlan_cfg
.vlan_info
.vlan_tag
== vlan
&&
10441 vport
->port_base_vlan_cfg
.vlan_info
.qos
== qos
)
10442 return HNAE3_PORT_BASE_VLAN_NOCHANGE
;
10444 return HNAE3_PORT_BASE_VLAN_MODIFY
;
10447 static int hclge_set_vf_vlan_filter(struct hnae3_handle
*handle
, int vfid
,
10448 u16 vlan
, u8 qos
, __be16 proto
)
10450 struct hnae3_ae_dev
*ae_dev
= pci_get_drvdata(handle
->pdev
);
10451 struct hclge_vport
*vport
= hclge_get_vport(handle
);
10452 struct hclge_dev
*hdev
= vport
->back
;
10453 struct hclge_vlan_info vlan_info
;
10457 if (hdev
->ae_dev
->dev_version
< HNAE3_DEVICE_VERSION_V2
)
10458 return -EOPNOTSUPP
;
10460 vport
= hclge_get_vf_vport(hdev
, vfid
);
10464 /* qos is a 3 bits value, so can not be bigger than 7 */
10465 if (vlan
> VLAN_N_VID
- 1 || qos
> 7)
10467 if (proto
!= htons(ETH_P_8021Q
))
10468 return -EPROTONOSUPPORT
;
10470 state
= hclge_get_port_base_vlan_state(vport
,
10471 vport
->port_base_vlan_cfg
.state
,
10473 if (state
== HNAE3_PORT_BASE_VLAN_NOCHANGE
)
10476 vlan_info
.vlan_tag
= vlan
;
10477 vlan_info
.qos
= qos
;
10478 vlan_info
.vlan_proto
= ntohs(proto
);
10480 ret
= hclge_update_port_base_vlan_cfg(vport
, state
, &vlan_info
);
10482 dev_err(&hdev
->pdev
->dev
,
10483 "failed to update port base vlan for vf %d, ret = %d\n",
10488 /* there is a timewindow for PF to know VF unalive, it may
10489 * cause send mailbox fail, but it doesn't matter, VF will
10490 * query it when reinit.
10491 * for DEVICE_VERSION_V3, vf doesn't need to know about the port based
10494 if (ae_dev
->dev_version
< HNAE3_DEVICE_VERSION_V3
) {
10495 if (test_bit(HCLGE_VPORT_STATE_ALIVE
, &vport
->state
))
10496 (void)hclge_push_vf_port_base_vlan_info(&hdev
->vport
[0],
10501 set_bit(HCLGE_VPORT_NEED_NOTIFY_VF_VLAN
,
10502 &vport
->need_notify
);
10507 static void hclge_clear_vf_vlan(struct hclge_dev
*hdev
)
10509 struct hclge_vlan_info
*vlan_info
;
10510 struct hclge_vport
*vport
;
10514 /* clear port base vlan for all vf */
10515 for (vf
= HCLGE_VF_VPORT_START_NUM
; vf
< hdev
->num_alloc_vport
; vf
++) {
10516 vport
= &hdev
->vport
[vf
];
10517 vlan_info
= &vport
->port_base_vlan_cfg
.vlan_info
;
10519 ret
= hclge_set_vlan_filter_hw(hdev
, htons(ETH_P_8021Q
),
10521 vlan_info
->vlan_tag
, true);
10523 dev_err(&hdev
->pdev
->dev
,
10524 "failed to clear vf vlan for vf%d, ret = %d\n",
10525 vf
- HCLGE_VF_VPORT_START_NUM
, ret
);
10529 int hclge_set_vlan_filter(struct hnae3_handle
*handle
, __be16 proto
,
10530 u16 vlan_id
, bool is_kill
)
10532 struct hclge_vport
*vport
= hclge_get_vport(handle
);
10533 struct hclge_dev
*hdev
= vport
->back
;
10534 bool writen_to_tbl
= false;
10537 /* When device is resetting or reset failed, firmware is unable to
10538 * handle mailbox. Just record the vlan id, and remove it after
10541 mutex_lock(&hdev
->vport_lock
);
10542 if ((test_bit(HCLGE_STATE_RST_HANDLING
, &hdev
->state
) ||
10543 test_bit(HCLGE_STATE_RST_FAIL
, &hdev
->state
)) && is_kill
) {
10544 set_bit(vlan_id
, vport
->vlan_del_fail_bmap
);
10545 mutex_unlock(&hdev
->vport_lock
);
10547 } else if (!is_kill
&& test_bit(vlan_id
, vport
->vlan_del_fail_bmap
)) {
10548 clear_bit(vlan_id
, vport
->vlan_del_fail_bmap
);
10550 mutex_unlock(&hdev
->vport_lock
);
10552 /* when port base vlan enabled, we use port base vlan as the vlan
10553 * filter entry. In this case, we don't update vlan filter table
10554 * when user add new vlan or remove exist vlan, just update the vport
10555 * vlan list. The vlan id in vlan list will be writen in vlan filter
10556 * table until port base vlan disabled
10558 if (handle
->port_base_vlan_state
== HNAE3_PORT_BASE_VLAN_DISABLE
) {
10559 ret
= hclge_set_vlan_filter_hw(hdev
, proto
, vport
->vport_id
,
10561 writen_to_tbl
= true;
10566 hclge_add_vport_vlan_table(vport
, vlan_id
,
10568 } else if (is_kill
&& vlan_id
!= 0) {
10569 mutex_lock(&hdev
->vport_lock
);
10570 hclge_rm_vport_vlan_table(vport
, vlan_id
, false);
10571 mutex_unlock(&hdev
->vport_lock
);
10573 } else if (is_kill
) {
10574 /* when remove hw vlan filter failed, record the vlan id,
10575 * and try to remove it from hw later, to be consistence
10578 mutex_lock(&hdev
->vport_lock
);
10579 set_bit(vlan_id
, vport
->vlan_del_fail_bmap
);
10580 mutex_unlock(&hdev
->vport_lock
);
10583 hclge_set_vport_vlan_fltr_change(vport
);
10588 static void hclge_sync_vlan_fltr_state(struct hclge_dev
*hdev
)
10590 struct hclge_vport
*vport
;
10594 for (i
= 0; i
< hdev
->num_alloc_vport
; i
++) {
10595 vport
= &hdev
->vport
[i
];
10596 if (!test_and_clear_bit(HCLGE_VPORT_STATE_VLAN_FLTR_CHANGE
,
10600 ret
= hclge_enable_vport_vlan_filter(vport
,
10601 vport
->req_vlan_fltr_en
);
10603 dev_err(&hdev
->pdev
->dev
,
10604 "failed to sync vlan filter state for vport%u, ret = %d\n",
10605 vport
->vport_id
, ret
);
10606 set_bit(HCLGE_VPORT_STATE_VLAN_FLTR_CHANGE
,
10613 static void hclge_sync_vlan_filter(struct hclge_dev
*hdev
)
10615 #define HCLGE_MAX_SYNC_COUNT 60
10617 int i
, ret
, sync_cnt
= 0;
10620 mutex_lock(&hdev
->vport_lock
);
10621 /* start from vport 1 for PF is always alive */
10622 for (i
= 0; i
< hdev
->num_alloc_vport
; i
++) {
10623 struct hclge_vport
*vport
= &hdev
->vport
[i
];
10625 vlan_id
= find_first_bit(vport
->vlan_del_fail_bmap
,
10627 while (vlan_id
!= VLAN_N_VID
) {
10628 ret
= hclge_set_vlan_filter_hw(hdev
, htons(ETH_P_8021Q
),
10629 vport
->vport_id
, vlan_id
,
10631 if (ret
&& ret
!= -EINVAL
) {
10632 mutex_unlock(&hdev
->vport_lock
);
10636 clear_bit(vlan_id
, vport
->vlan_del_fail_bmap
);
10637 hclge_rm_vport_vlan_table(vport
, vlan_id
, false);
10638 hclge_set_vport_vlan_fltr_change(vport
);
10641 if (sync_cnt
>= HCLGE_MAX_SYNC_COUNT
) {
10642 mutex_unlock(&hdev
->vport_lock
);
10646 vlan_id
= find_first_bit(vport
->vlan_del_fail_bmap
,
10650 mutex_unlock(&hdev
->vport_lock
);
10652 hclge_sync_vlan_fltr_state(hdev
);
10655 static int hclge_set_mac_mtu(struct hclge_dev
*hdev
, int new_mps
)
10657 struct hclge_config_max_frm_size_cmd
*req
;
10658 struct hclge_desc desc
;
10660 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_CONFIG_MAX_FRM_SIZE
, false);
10662 req
= (struct hclge_config_max_frm_size_cmd
*)desc
.data
;
10663 req
->max_frm_size
= cpu_to_le16(new_mps
);
10664 req
->min_frm_size
= HCLGE_MAC_MIN_FRAME
;
10666 return hclge_cmd_send(&hdev
->hw
, &desc
, 1);
10669 static int hclge_set_mtu(struct hnae3_handle
*handle
, int new_mtu
)
10671 struct hclge_vport
*vport
= hclge_get_vport(handle
);
10673 return hclge_set_vport_mtu(vport
, new_mtu
);
10676 int hclge_set_vport_mtu(struct hclge_vport
*vport
, int new_mtu
)
10678 struct hclge_dev
*hdev
= vport
->back
;
10679 int i
, max_frm_size
, ret
;
10681 /* HW supprt 2 layer vlan */
10682 max_frm_size
= new_mtu
+ ETH_HLEN
+ ETH_FCS_LEN
+ 2 * VLAN_HLEN
;
10683 if (max_frm_size
< HCLGE_MAC_MIN_FRAME
||
10684 max_frm_size
> hdev
->ae_dev
->dev_specs
.max_frm_size
)
10687 max_frm_size
= max(max_frm_size
, HCLGE_MAC_DEFAULT_FRAME
);
10688 mutex_lock(&hdev
->vport_lock
);
10689 /* VF's mps must fit within hdev->mps */
10690 if (vport
->vport_id
&& max_frm_size
> hdev
->mps
) {
10691 mutex_unlock(&hdev
->vport_lock
);
10693 } else if (vport
->vport_id
) {
10694 vport
->mps
= max_frm_size
;
10695 mutex_unlock(&hdev
->vport_lock
);
10699 /* PF's mps must be greater then VF's mps */
10700 for (i
= 1; i
< hdev
->num_alloc_vport
; i
++)
10701 if (max_frm_size
< hdev
->vport
[i
].mps
) {
10702 dev_err(&hdev
->pdev
->dev
,
10703 "failed to set pf mtu for less than vport %d, mps = %u.\n",
10704 i
, hdev
->vport
[i
].mps
);
10705 mutex_unlock(&hdev
->vport_lock
);
10709 hclge_notify_client(hdev
, HNAE3_DOWN_CLIENT
);
10711 ret
= hclge_set_mac_mtu(hdev
, max_frm_size
);
10713 dev_err(&hdev
->pdev
->dev
,
10714 "Change mtu fail, ret =%d\n", ret
);
10718 hdev
->mps
= max_frm_size
;
10719 vport
->mps
= max_frm_size
;
10721 ret
= hclge_buffer_alloc(hdev
);
10723 dev_err(&hdev
->pdev
->dev
,
10724 "Allocate buffer fail, ret =%d\n", ret
);
10727 hclge_notify_client(hdev
, HNAE3_UP_CLIENT
);
10728 mutex_unlock(&hdev
->vport_lock
);
10732 static int hclge_reset_tqp_cmd_send(struct hclge_dev
*hdev
, u16 queue_id
,
10735 struct hclge_reset_tqp_queue_cmd
*req
;
10736 struct hclge_desc desc
;
10739 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_RESET_TQP_QUEUE
, false);
10741 req
= (struct hclge_reset_tqp_queue_cmd
*)desc
.data
;
10742 req
->tqp_id
= cpu_to_le16(queue_id
);
10744 hnae3_set_bit(req
->reset_req
, HCLGE_TQP_RESET_B
, 1U);
10746 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
10748 dev_err(&hdev
->pdev
->dev
,
10749 "Send tqp reset cmd error, status =%d\n", ret
);
10756 static int hclge_get_reset_status(struct hclge_dev
*hdev
, u16 queue_id
,
10759 struct hclge_reset_tqp_queue_cmd
*req
;
10760 struct hclge_desc desc
;
10763 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_RESET_TQP_QUEUE
, true);
10765 req
= (struct hclge_reset_tqp_queue_cmd
*)desc
.data
;
10766 req
->tqp_id
= cpu_to_le16(queue_id
);
10768 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
10770 dev_err(&hdev
->pdev
->dev
,
10771 "Get reset status error, status =%d\n", ret
);
10775 *reset_status
= hnae3_get_bit(req
->ready_to_reset
, HCLGE_TQP_RESET_B
);
10780 u16
hclge_covert_handle_qid_global(struct hnae3_handle
*handle
, u16 queue_id
)
10782 struct hclge_comm_tqp
*tqp
;
10783 struct hnae3_queue
*queue
;
10785 queue
= handle
->kinfo
.tqp
[queue_id
];
10786 tqp
= container_of(queue
, struct hclge_comm_tqp
, q
);
10791 static int hclge_reset_tqp_cmd(struct hnae3_handle
*handle
)
10793 struct hclge_vport
*vport
= hclge_get_vport(handle
);
10794 struct hclge_dev
*hdev
= vport
->back
;
10795 u16 reset_try_times
= 0;
10801 for (i
= 0; i
< handle
->kinfo
.num_tqps
; i
++) {
10802 queue_gid
= hclge_covert_handle_qid_global(handle
, i
);
10803 ret
= hclge_reset_tqp_cmd_send(hdev
, queue_gid
, true);
10805 dev_err(&hdev
->pdev
->dev
,
10806 "failed to send reset tqp cmd, ret = %d\n",
10811 while (reset_try_times
++ < HCLGE_TQP_RESET_TRY_TIMES
) {
10812 ret
= hclge_get_reset_status(hdev
, queue_gid
,
10820 /* Wait for tqp hw reset */
10821 usleep_range(1000, 1200);
10824 if (reset_try_times
>= HCLGE_TQP_RESET_TRY_TIMES
) {
10825 dev_err(&hdev
->pdev
->dev
,
10826 "wait for tqp hw reset timeout\n");
10830 ret
= hclge_reset_tqp_cmd_send(hdev
, queue_gid
, false);
10832 dev_err(&hdev
->pdev
->dev
,
10833 "failed to deassert soft reset, ret = %d\n",
10837 reset_try_times
= 0;
10842 static int hclge_reset_rcb(struct hnae3_handle
*handle
)
10844 #define HCLGE_RESET_RCB_NOT_SUPPORT 0U
10845 #define HCLGE_RESET_RCB_SUCCESS 1U
10847 struct hclge_vport
*vport
= hclge_get_vport(handle
);
10848 struct hclge_dev
*hdev
= vport
->back
;
10849 struct hclge_reset_cmd
*req
;
10850 struct hclge_desc desc
;
10855 queue_gid
= hclge_covert_handle_qid_global(handle
, 0);
10857 req
= (struct hclge_reset_cmd
*)desc
.data
;
10858 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_CFG_RST_TRIGGER
, false);
10859 hnae3_set_bit(req
->fun_reset_rcb
, HCLGE_CFG_RESET_RCB_B
, 1);
10860 req
->fun_reset_rcb_vqid_start
= cpu_to_le16(queue_gid
);
10861 req
->fun_reset_rcb_vqid_num
= cpu_to_le16(handle
->kinfo
.num_tqps
);
10863 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
10865 dev_err(&hdev
->pdev
->dev
,
10866 "failed to send rcb reset cmd, ret = %d\n", ret
);
10870 return_status
= req
->fun_reset_rcb_return_status
;
10871 if (return_status
== HCLGE_RESET_RCB_SUCCESS
)
10874 if (return_status
!= HCLGE_RESET_RCB_NOT_SUPPORT
) {
10875 dev_err(&hdev
->pdev
->dev
, "failed to reset rcb, ret = %u\n",
10880 /* if reset rcb cmd is unsupported, we need to send reset tqp cmd
10881 * again to reset all tqps
10883 return hclge_reset_tqp_cmd(handle
);
10886 int hclge_reset_tqp(struct hnae3_handle
*handle
)
10888 struct hclge_vport
*vport
= hclge_get_vport(handle
);
10889 struct hclge_dev
*hdev
= vport
->back
;
10892 /* only need to disable PF's tqp */
10893 if (!vport
->vport_id
) {
10894 ret
= hclge_tqp_enable(handle
, false);
10896 dev_err(&hdev
->pdev
->dev
,
10897 "failed to disable tqp, ret = %d\n", ret
);
10902 return hclge_reset_rcb(handle
);
10905 static u32
hclge_get_fw_version(struct hnae3_handle
*handle
)
10907 struct hclge_vport
*vport
= hclge_get_vport(handle
);
10908 struct hclge_dev
*hdev
= vport
->back
;
10910 return hdev
->fw_version
;
10913 int hclge_query_scc_version(struct hclge_dev
*hdev
, u32
*scc_version
)
10915 struct hclge_comm_query_scc_cmd
*resp
;
10916 struct hclge_desc desc
;
10919 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_QUERY_SCC_VER
, 1);
10920 resp
= (struct hclge_comm_query_scc_cmd
*)desc
.data
;
10922 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
10926 *scc_version
= le32_to_cpu(resp
->scc_version
);
10931 static void hclge_set_flowctrl_adv(struct hclge_dev
*hdev
, u32 rx_en
, u32 tx_en
)
10933 struct phy_device
*phydev
= hdev
->hw
.mac
.phydev
;
10938 phy_set_asym_pause(phydev
, rx_en
, tx_en
);
10941 static int hclge_cfg_pauseparam(struct hclge_dev
*hdev
, u32 rx_en
, u32 tx_en
)
10945 if (hdev
->tm_info
.fc_mode
== HCLGE_FC_PFC
)
10948 ret
= hclge_mac_pause_en_cfg(hdev
, tx_en
, rx_en
);
10950 dev_err(&hdev
->pdev
->dev
,
10951 "configure pauseparam error, ret = %d.\n", ret
);
10956 int hclge_cfg_flowctrl(struct hclge_dev
*hdev
)
10958 struct phy_device
*phydev
= hdev
->hw
.mac
.phydev
;
10959 u16 remote_advertising
= 0;
10960 u16 local_advertising
;
10961 u32 rx_pause
, tx_pause
;
10967 if (!phydev
->autoneg
)
10968 return hclge_mac_pause_setup_hw(hdev
);
10970 local_advertising
= linkmode_adv_to_lcl_adv_t(phydev
->advertising
);
10973 remote_advertising
= LPA_PAUSE_CAP
;
10975 if (phydev
->asym_pause
)
10976 remote_advertising
|= LPA_PAUSE_ASYM
;
10978 flowctl
= mii_resolve_flowctrl_fdx(local_advertising
,
10979 remote_advertising
);
10980 tx_pause
= flowctl
& FLOW_CTRL_TX
;
10981 rx_pause
= flowctl
& FLOW_CTRL_RX
;
10983 if (phydev
->duplex
== HCLGE_MAC_HALF
) {
10988 return hclge_cfg_pauseparam(hdev
, rx_pause
, tx_pause
);
10991 static void hclge_get_pauseparam(struct hnae3_handle
*handle
, u32
*auto_neg
,
10992 u32
*rx_en
, u32
*tx_en
)
10994 struct hclge_vport
*vport
= hclge_get_vport(handle
);
10995 struct hclge_dev
*hdev
= vport
->back
;
10996 u8 media_type
= hdev
->hw
.mac
.media_type
;
10998 *auto_neg
= (media_type
== HNAE3_MEDIA_TYPE_COPPER
) ?
10999 hclge_get_autoneg(handle
) : 0;
11001 if (hdev
->tm_info
.fc_mode
== HCLGE_FC_PFC
) {
11007 if (hdev
->tm_info
.fc_mode
== HCLGE_FC_RX_PAUSE
) {
11010 } else if (hdev
->tm_info
.fc_mode
== HCLGE_FC_TX_PAUSE
) {
11013 } else if (hdev
->tm_info
.fc_mode
== HCLGE_FC_FULL
) {
11022 static void hclge_record_user_pauseparam(struct hclge_dev
*hdev
,
11023 u32 rx_en
, u32 tx_en
)
11025 if (rx_en
&& tx_en
)
11026 hdev
->fc_mode_last_time
= HCLGE_FC_FULL
;
11027 else if (rx_en
&& !tx_en
)
11028 hdev
->fc_mode_last_time
= HCLGE_FC_RX_PAUSE
;
11029 else if (!rx_en
&& tx_en
)
11030 hdev
->fc_mode_last_time
= HCLGE_FC_TX_PAUSE
;
11032 hdev
->fc_mode_last_time
= HCLGE_FC_NONE
;
11034 hdev
->tm_info
.fc_mode
= hdev
->fc_mode_last_time
;
11037 static int hclge_set_pauseparam(struct hnae3_handle
*handle
, u32 auto_neg
,
11038 u32 rx_en
, u32 tx_en
)
11040 struct hclge_vport
*vport
= hclge_get_vport(handle
);
11041 struct hclge_dev
*hdev
= vport
->back
;
11042 struct phy_device
*phydev
= hdev
->hw
.mac
.phydev
;
11045 if (phydev
|| hnae3_dev_phy_imp_supported(hdev
)) {
11046 fc_autoneg
= hclge_get_autoneg(handle
);
11047 if (auto_neg
!= fc_autoneg
) {
11048 dev_info(&hdev
->pdev
->dev
,
11049 "To change autoneg please use: ethtool -s <dev> autoneg <on|off>\n");
11050 return -EOPNOTSUPP
;
11054 if (hdev
->tm_info
.fc_mode
== HCLGE_FC_PFC
) {
11055 dev_info(&hdev
->pdev
->dev
,
11056 "Priority flow control enabled. Cannot set link flow control.\n");
11057 return -EOPNOTSUPP
;
11060 hclge_set_flowctrl_adv(hdev
, rx_en
, tx_en
);
11062 hclge_record_user_pauseparam(hdev
, rx_en
, tx_en
);
11064 if (!auto_neg
|| hnae3_dev_phy_imp_supported(hdev
))
11065 return hclge_cfg_pauseparam(hdev
, rx_en
, tx_en
);
11068 return phy_start_aneg(phydev
);
11070 return -EOPNOTSUPP
;
11073 static void hclge_get_ksettings_an_result(struct hnae3_handle
*handle
,
11074 u8
*auto_neg
, u32
*speed
, u8
*duplex
, u32
*lane_num
)
11076 struct hclge_vport
*vport
= hclge_get_vport(handle
);
11077 struct hclge_dev
*hdev
= vport
->back
;
11080 *speed
= hdev
->hw
.mac
.speed
;
11082 *duplex
= hdev
->hw
.mac
.duplex
;
11084 *auto_neg
= hdev
->hw
.mac
.autoneg
;
11086 *lane_num
= hdev
->hw
.mac
.lane_num
;
11089 static void hclge_get_media_type(struct hnae3_handle
*handle
, u8
*media_type
,
11092 struct hclge_vport
*vport
= hclge_get_vport(handle
);
11093 struct hclge_dev
*hdev
= vport
->back
;
11095 /* When nic is down, the service task is not running, doesn't update
11096 * the port information per second. Query the port information before
11097 * return the media type, ensure getting the correct media information.
11099 hclge_update_port_info(hdev
);
11102 *media_type
= hdev
->hw
.mac
.media_type
;
11105 *module_type
= hdev
->hw
.mac
.module_type
;
11108 static void hclge_get_mdix_mode(struct hnae3_handle
*handle
,
11109 u8
*tp_mdix_ctrl
, u8
*tp_mdix
)
11111 struct hclge_vport
*vport
= hclge_get_vport(handle
);
11112 struct hclge_dev
*hdev
= vport
->back
;
11113 struct phy_device
*phydev
= hdev
->hw
.mac
.phydev
;
11114 int mdix_ctrl
, mdix
, is_resolved
;
11115 unsigned int retval
;
11118 *tp_mdix_ctrl
= ETH_TP_MDI_INVALID
;
11119 *tp_mdix
= ETH_TP_MDI_INVALID
;
11123 phy_write(phydev
, HCLGE_PHY_PAGE_REG
, HCLGE_PHY_PAGE_MDIX
);
11125 retval
= phy_read(phydev
, HCLGE_PHY_CSC_REG
);
11126 mdix_ctrl
= hnae3_get_field(retval
, HCLGE_PHY_MDIX_CTRL_M
,
11127 HCLGE_PHY_MDIX_CTRL_S
);
11129 retval
= phy_read(phydev
, HCLGE_PHY_CSS_REG
);
11130 mdix
= hnae3_get_bit(retval
, HCLGE_PHY_MDIX_STATUS_B
);
11131 is_resolved
= hnae3_get_bit(retval
, HCLGE_PHY_SPEED_DUP_RESOLVE_B
);
11133 phy_write(phydev
, HCLGE_PHY_PAGE_REG
, HCLGE_PHY_PAGE_COPPER
);
11135 switch (mdix_ctrl
) {
11137 *tp_mdix_ctrl
= ETH_TP_MDI
;
11140 *tp_mdix_ctrl
= ETH_TP_MDI_X
;
11143 *tp_mdix_ctrl
= ETH_TP_MDI_AUTO
;
11146 *tp_mdix_ctrl
= ETH_TP_MDI_INVALID
;
11151 *tp_mdix
= ETH_TP_MDI_INVALID
;
11153 *tp_mdix
= ETH_TP_MDI_X
;
11155 *tp_mdix
= ETH_TP_MDI
;
11158 static void hclge_info_show(struct hclge_dev
*hdev
)
11160 struct hnae3_handle
*handle
= &hdev
->vport
->nic
;
11161 struct device
*dev
= &hdev
->pdev
->dev
;
11163 dev_info(dev
, "PF info begin:\n");
11165 dev_info(dev
, "Task queue pairs numbers: %u\n", hdev
->num_tqps
);
11166 dev_info(dev
, "Desc num per TX queue: %u\n", hdev
->num_tx_desc
);
11167 dev_info(dev
, "Desc num per RX queue: %u\n", hdev
->num_rx_desc
);
11168 dev_info(dev
, "Numbers of vports: %u\n", hdev
->num_alloc_vport
);
11169 dev_info(dev
, "Numbers of VF for this PF: %u\n", hdev
->num_req_vfs
);
11170 dev_info(dev
, "HW tc map: 0x%x\n", hdev
->hw_tc_map
);
11171 dev_info(dev
, "Total buffer size for TX/RX: %u\n", hdev
->pkt_buf_size
);
11172 dev_info(dev
, "TX buffer size for each TC: %u\n", hdev
->tx_buf_size
);
11173 dev_info(dev
, "DV buffer size for each TC: %u\n", hdev
->dv_buf_size
);
11174 dev_info(dev
, "This is %s PF\n",
11175 hdev
->flag
& HCLGE_FLAG_MAIN
? "main" : "not main");
11176 dev_info(dev
, "DCB %s\n",
11177 handle
->kinfo
.tc_info
.dcb_ets_active
? "enable" : "disable");
11178 dev_info(dev
, "MQPRIO %s\n",
11179 handle
->kinfo
.tc_info
.mqprio_active
? "enable" : "disable");
11180 dev_info(dev
, "Default tx spare buffer size: %u\n",
11181 hdev
->tx_spare_buf_size
);
11183 dev_info(dev
, "PF info end.\n");
11186 static int hclge_init_nic_client_instance(struct hnae3_ae_dev
*ae_dev
,
11187 struct hclge_vport
*vport
)
11189 struct hnae3_client
*client
= vport
->nic
.client
;
11190 struct hclge_dev
*hdev
= ae_dev
->priv
;
11191 int rst_cnt
= hdev
->rst_stats
.reset_cnt
;
11194 ret
= client
->ops
->init_instance(&vport
->nic
);
11198 set_bit(HCLGE_STATE_NIC_REGISTERED
, &hdev
->state
);
11199 if (test_bit(HCLGE_STATE_RST_HANDLING
, &hdev
->state
) ||
11200 rst_cnt
!= hdev
->rst_stats
.reset_cnt
) {
11205 /* Enable nic hw error interrupts */
11206 ret
= hclge_config_nic_hw_error(hdev
, true);
11208 dev_err(&ae_dev
->pdev
->dev
,
11209 "fail(%d) to enable hw error interrupts\n", ret
);
11213 hnae3_set_client_init_flag(client
, ae_dev
, 1);
11215 if (netif_msg_drv(&hdev
->vport
->nic
))
11216 hclge_info_show(hdev
);
11221 clear_bit(HCLGE_STATE_NIC_REGISTERED
, &hdev
->state
);
11222 while (test_bit(HCLGE_STATE_RST_HANDLING
, &hdev
->state
))
11223 msleep(HCLGE_WAIT_RESET_DONE
);
11225 client
->ops
->uninit_instance(&vport
->nic
, 0);
11230 static int hclge_init_roce_client_instance(struct hnae3_ae_dev
*ae_dev
,
11231 struct hclge_vport
*vport
)
11233 struct hclge_dev
*hdev
= ae_dev
->priv
;
11234 struct hnae3_client
*client
;
11238 if (!hnae3_dev_roce_supported(hdev
) || !hdev
->roce_client
||
11242 client
= hdev
->roce_client
;
11243 ret
= hclge_init_roce_base_info(vport
);
11247 rst_cnt
= hdev
->rst_stats
.reset_cnt
;
11248 ret
= client
->ops
->init_instance(&vport
->roce
);
11252 set_bit(HCLGE_STATE_ROCE_REGISTERED
, &hdev
->state
);
11253 if (test_bit(HCLGE_STATE_RST_HANDLING
, &hdev
->state
) ||
11254 rst_cnt
!= hdev
->rst_stats
.reset_cnt
) {
11256 goto init_roce_err
;
11259 /* Enable roce ras interrupts */
11260 ret
= hclge_config_rocee_ras_interrupt(hdev
, true);
11262 dev_err(&ae_dev
->pdev
->dev
,
11263 "fail(%d) to enable roce ras interrupts\n", ret
);
11264 goto init_roce_err
;
11267 hnae3_set_client_init_flag(client
, ae_dev
, 1);
11272 clear_bit(HCLGE_STATE_ROCE_REGISTERED
, &hdev
->state
);
11273 while (test_bit(HCLGE_STATE_RST_HANDLING
, &hdev
->state
))
11274 msleep(HCLGE_WAIT_RESET_DONE
);
11276 hdev
->roce_client
->ops
->uninit_instance(&vport
->roce
, 0);
11281 static int hclge_init_client_instance(struct hnae3_client
*client
,
11282 struct hnae3_ae_dev
*ae_dev
)
11284 struct hclge_dev
*hdev
= ae_dev
->priv
;
11285 struct hclge_vport
*vport
= &hdev
->vport
[0];
11288 switch (client
->type
) {
11289 case HNAE3_CLIENT_KNIC
:
11290 hdev
->nic_client
= client
;
11291 vport
->nic
.client
= client
;
11292 ret
= hclge_init_nic_client_instance(ae_dev
, vport
);
11296 ret
= hclge_init_roce_client_instance(ae_dev
, vport
);
11301 case HNAE3_CLIENT_ROCE
:
11302 if (hnae3_dev_roce_supported(hdev
)) {
11303 hdev
->roce_client
= client
;
11304 vport
->roce
.client
= client
;
11307 ret
= hclge_init_roce_client_instance(ae_dev
, vport
);
11319 hdev
->nic_client
= NULL
;
11320 vport
->nic
.client
= NULL
;
11323 hdev
->roce_client
= NULL
;
11324 vport
->roce
.client
= NULL
;
11328 static bool hclge_uninit_need_wait(struct hclge_dev
*hdev
)
11330 return test_bit(HCLGE_STATE_RST_HANDLING
, &hdev
->state
) ||
11331 test_bit(HCLGE_STATE_LINK_UPDATING
, &hdev
->state
);
11334 static void hclge_uninit_client_instance(struct hnae3_client
*client
,
11335 struct hnae3_ae_dev
*ae_dev
)
11337 struct hclge_dev
*hdev
= ae_dev
->priv
;
11338 struct hclge_vport
*vport
= &hdev
->vport
[0];
11340 if (hdev
->roce_client
) {
11341 clear_bit(HCLGE_STATE_ROCE_REGISTERED
, &hdev
->state
);
11342 while (hclge_uninit_need_wait(hdev
))
11343 msleep(HCLGE_WAIT_RESET_DONE
);
11345 hdev
->roce_client
->ops
->uninit_instance(&vport
->roce
, 0);
11346 hdev
->roce_client
= NULL
;
11347 vport
->roce
.client
= NULL
;
11349 if (client
->type
== HNAE3_CLIENT_ROCE
)
11351 if (hdev
->nic_client
&& client
->ops
->uninit_instance
) {
11352 clear_bit(HCLGE_STATE_NIC_REGISTERED
, &hdev
->state
);
11353 while (test_bit(HCLGE_STATE_RST_HANDLING
, &hdev
->state
))
11354 msleep(HCLGE_WAIT_RESET_DONE
);
11356 client
->ops
->uninit_instance(&vport
->nic
, 0);
11357 hdev
->nic_client
= NULL
;
11358 vport
->nic
.client
= NULL
;
11362 static int hclge_dev_mem_map(struct hclge_dev
*hdev
)
11364 struct pci_dev
*pdev
= hdev
->pdev
;
11365 struct hclge_hw
*hw
= &hdev
->hw
;
11367 /* for device does not have device memory, return directly */
11368 if (!(pci_select_bars(pdev
, IORESOURCE_MEM
) & BIT(HCLGE_MEM_BAR
)))
11372 devm_ioremap_wc(&pdev
->dev
,
11373 pci_resource_start(pdev
, HCLGE_MEM_BAR
),
11374 pci_resource_len(pdev
, HCLGE_MEM_BAR
));
11375 if (!hw
->hw
.mem_base
) {
11376 dev_err(&pdev
->dev
, "failed to map device memory\n");
11383 static int hclge_pci_init(struct hclge_dev
*hdev
)
11385 struct pci_dev
*pdev
= hdev
->pdev
;
11386 struct hclge_hw
*hw
;
11389 ret
= pci_enable_device(pdev
);
11391 dev_err(&pdev
->dev
, "failed to enable PCI device\n");
11395 ret
= dma_set_mask_and_coherent(&pdev
->dev
, DMA_BIT_MASK(64));
11397 ret
= dma_set_mask_and_coherent(&pdev
->dev
, DMA_BIT_MASK(32));
11399 dev_err(&pdev
->dev
,
11400 "can't set consistent PCI DMA");
11401 goto err_disable_device
;
11403 dev_warn(&pdev
->dev
, "set DMA mask to 32 bits\n");
11406 ret
= pci_request_regions(pdev
, HCLGE_DRIVER_NAME
);
11408 dev_err(&pdev
->dev
, "PCI request regions failed %d\n", ret
);
11409 goto err_disable_device
;
11412 pci_set_master(pdev
);
11414 hw
->hw
.io_base
= pcim_iomap(pdev
, 2, 0);
11415 if (!hw
->hw
.io_base
) {
11416 dev_err(&pdev
->dev
, "Can't map configuration register space\n");
11418 goto err_release_regions
;
11421 ret
= hclge_dev_mem_map(hdev
);
11423 goto err_unmap_io_base
;
11425 hdev
->num_req_vfs
= pci_sriov_get_totalvfs(pdev
);
11430 pcim_iounmap(pdev
, hdev
->hw
.hw
.io_base
);
11431 err_release_regions
:
11432 pci_release_regions(pdev
);
11433 err_disable_device
:
11434 pci_disable_device(pdev
);
11439 static void hclge_pci_uninit(struct hclge_dev
*hdev
)
11441 struct pci_dev
*pdev
= hdev
->pdev
;
11443 if (hdev
->hw
.hw
.mem_base
)
11444 devm_iounmap(&pdev
->dev
, hdev
->hw
.hw
.mem_base
);
11446 pcim_iounmap(pdev
, hdev
->hw
.hw
.io_base
);
11447 pci_free_irq_vectors(pdev
);
11448 pci_release_regions(pdev
);
11449 pci_disable_device(pdev
);
11452 static void hclge_state_init(struct hclge_dev
*hdev
)
11454 set_bit(HCLGE_STATE_SERVICE_INITED
, &hdev
->state
);
11455 set_bit(HCLGE_STATE_DOWN
, &hdev
->state
);
11456 clear_bit(HCLGE_STATE_RST_SERVICE_SCHED
, &hdev
->state
);
11457 clear_bit(HCLGE_STATE_RST_HANDLING
, &hdev
->state
);
11458 clear_bit(HCLGE_STATE_RST_FAIL
, &hdev
->state
);
11459 clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED
, &hdev
->state
);
11460 clear_bit(HCLGE_STATE_MBX_HANDLING
, &hdev
->state
);
11463 static void hclge_state_uninit(struct hclge_dev
*hdev
)
11465 set_bit(HCLGE_STATE_DOWN
, &hdev
->state
);
11466 set_bit(HCLGE_STATE_REMOVING
, &hdev
->state
);
11468 if (hdev
->reset_timer
.function
)
11469 del_timer_sync(&hdev
->reset_timer
);
11470 if (hdev
->service_task
.work
.func
)
11471 cancel_delayed_work_sync(&hdev
->service_task
);
11474 static void hclge_reset_prepare_general(struct hnae3_ae_dev
*ae_dev
,
11475 enum hnae3_reset_type rst_type
)
11477 #define HCLGE_RESET_RETRY_WAIT_MS 500
11478 #define HCLGE_RESET_RETRY_CNT 5
11480 struct hclge_dev
*hdev
= ae_dev
->priv
;
11484 while (retry_cnt
++ < HCLGE_RESET_RETRY_CNT
) {
11485 down(&hdev
->reset_sem
);
11486 set_bit(HCLGE_STATE_RST_HANDLING
, &hdev
->state
);
11487 hdev
->reset_type
= rst_type
;
11488 ret
= hclge_reset_prepare(hdev
);
11489 if (!ret
&& !hdev
->reset_pending
)
11492 dev_err(&hdev
->pdev
->dev
,
11493 "failed to prepare to reset, ret=%d, reset_pending:0x%lx, retry_cnt:%d\n",
11494 ret
, hdev
->reset_pending
, retry_cnt
);
11495 clear_bit(HCLGE_STATE_RST_HANDLING
, &hdev
->state
);
11496 up(&hdev
->reset_sem
);
11497 msleep(HCLGE_RESET_RETRY_WAIT_MS
);
11500 /* disable misc vector before reset done */
11501 hclge_enable_vector(&hdev
->misc_vector
, false);
11502 set_bit(HCLGE_COMM_STATE_CMD_DISABLE
, &hdev
->hw
.hw
.comm_state
);
11504 if (hdev
->reset_type
== HNAE3_FLR_RESET
)
11505 hdev
->rst_stats
.flr_rst_cnt
++;
11508 static void hclge_reset_done(struct hnae3_ae_dev
*ae_dev
)
11510 struct hclge_dev
*hdev
= ae_dev
->priv
;
11513 hclge_enable_vector(&hdev
->misc_vector
, true);
11515 ret
= hclge_reset_rebuild(hdev
);
11517 dev_err(&hdev
->pdev
->dev
, "fail to rebuild, ret=%d\n", ret
);
11519 hdev
->reset_type
= HNAE3_NONE_RESET
;
11520 if (test_and_clear_bit(HCLGE_STATE_RST_HANDLING
, &hdev
->state
))
11521 up(&hdev
->reset_sem
);
11524 static void hclge_clear_resetting_state(struct hclge_dev
*hdev
)
11528 for (i
= 0; i
< hdev
->num_alloc_vport
; i
++) {
11529 struct hclge_vport
*vport
= &hdev
->vport
[i
];
11532 /* Send cmd to clear vport's FUNC_RST_ING */
11533 ret
= hclge_set_vf_rst(hdev
, vport
->vport_id
, false);
11535 dev_warn(&hdev
->pdev
->dev
,
11536 "clear vport(%u) rst failed %d!\n",
11537 vport
->vport_id
, ret
);
11541 static int hclge_clear_hw_resource(struct hclge_dev
*hdev
)
11543 struct hclge_desc desc
;
11546 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_CLEAR_HW_RESOURCE
, false);
11548 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
11549 /* This new command is only supported by new firmware, it will
11550 * fail with older firmware. Error value -EOPNOSUPP can only be
11551 * returned by older firmware running this command, to keep code
11552 * backward compatible we will override this value and return
11555 if (ret
&& ret
!= -EOPNOTSUPP
) {
11556 dev_err(&hdev
->pdev
->dev
,
11557 "failed to clear hw resource, ret = %d\n", ret
);
11563 static void hclge_init_rxd_adv_layout(struct hclge_dev
*hdev
)
11565 if (hnae3_ae_dev_rxd_adv_layout_supported(hdev
->ae_dev
))
11566 hclge_write_dev(&hdev
->hw
, HCLGE_RXD_ADV_LAYOUT_EN_REG
, 1);
11569 static void hclge_uninit_rxd_adv_layout(struct hclge_dev
*hdev
)
11571 if (hnae3_ae_dev_rxd_adv_layout_supported(hdev
->ae_dev
))
11572 hclge_write_dev(&hdev
->hw
, HCLGE_RXD_ADV_LAYOUT_EN_REG
, 0);
11575 static struct hclge_wol_info
*hclge_get_wol_info(struct hnae3_handle
*handle
)
11577 struct hclge_vport
*vport
= hclge_get_vport(handle
);
11579 return &vport
->back
->hw
.mac
.wol
;
11582 static int hclge_get_wol_supported_mode(struct hclge_dev
*hdev
,
11583 u32
*wol_supported
)
11585 struct hclge_query_wol_supported_cmd
*wol_supported_cmd
;
11586 struct hclge_desc desc
;
11589 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_WOL_GET_SUPPORTED_MODE
,
11591 wol_supported_cmd
= (struct hclge_query_wol_supported_cmd
*)desc
.data
;
11593 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
11595 dev_err(&hdev
->pdev
->dev
,
11596 "failed to query wol supported, ret = %d\n", ret
);
11600 *wol_supported
= le32_to_cpu(wol_supported_cmd
->supported_wake_mode
);
11605 static int hclge_set_wol_cfg(struct hclge_dev
*hdev
,
11606 struct hclge_wol_info
*wol_info
)
11608 struct hclge_wol_cfg_cmd
*wol_cfg_cmd
;
11609 struct hclge_desc desc
;
11612 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_WOL_CFG
, false);
11613 wol_cfg_cmd
= (struct hclge_wol_cfg_cmd
*)desc
.data
;
11614 wol_cfg_cmd
->wake_on_lan_mode
= cpu_to_le32(wol_info
->wol_current_mode
);
11615 wol_cfg_cmd
->sopass_size
= wol_info
->wol_sopass_size
;
11616 memcpy(wol_cfg_cmd
->sopass
, wol_info
->wol_sopass
, SOPASS_MAX
);
11618 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
11620 dev_err(&hdev
->pdev
->dev
,
11621 "failed to set wol config, ret = %d\n", ret
);
11626 static int hclge_update_wol(struct hclge_dev
*hdev
)
11628 struct hclge_wol_info
*wol_info
= &hdev
->hw
.mac
.wol
;
11630 if (!hnae3_ae_dev_wol_supported(hdev
->ae_dev
))
11633 return hclge_set_wol_cfg(hdev
, wol_info
);
11636 static int hclge_init_wol(struct hclge_dev
*hdev
)
11638 struct hclge_wol_info
*wol_info
= &hdev
->hw
.mac
.wol
;
11641 if (!hnae3_ae_dev_wol_supported(hdev
->ae_dev
))
11644 memset(wol_info
, 0, sizeof(struct hclge_wol_info
));
11645 ret
= hclge_get_wol_supported_mode(hdev
,
11646 &wol_info
->wol_support_mode
);
11648 wol_info
->wol_support_mode
= 0;
11652 return hclge_update_wol(hdev
);
11655 static void hclge_get_wol(struct hnae3_handle
*handle
,
11656 struct ethtool_wolinfo
*wol
)
11658 struct hclge_wol_info
*wol_info
= hclge_get_wol_info(handle
);
11660 wol
->supported
= wol_info
->wol_support_mode
;
11661 wol
->wolopts
= wol_info
->wol_current_mode
;
11662 if (wol_info
->wol_current_mode
& WAKE_MAGICSECURE
)
11663 memcpy(wol
->sopass
, wol_info
->wol_sopass
, SOPASS_MAX
);
11666 static int hclge_set_wol(struct hnae3_handle
*handle
,
11667 struct ethtool_wolinfo
*wol
)
11669 struct hclge_wol_info
*wol_info
= hclge_get_wol_info(handle
);
11670 struct hclge_vport
*vport
= hclge_get_vport(handle
);
11674 wol_mode
= wol
->wolopts
;
11675 if (wol_mode
& ~wol_info
->wol_support_mode
)
11678 wol_info
->wol_current_mode
= wol_mode
;
11679 if (wol_mode
& WAKE_MAGICSECURE
) {
11680 memcpy(wol_info
->wol_sopass
, wol
->sopass
, SOPASS_MAX
);
11681 wol_info
->wol_sopass_size
= SOPASS_MAX
;
11683 wol_info
->wol_sopass_size
= 0;
11686 ret
= hclge_set_wol_cfg(vport
->back
, wol_info
);
11688 wol_info
->wol_current_mode
= 0;
11693 static int hclge_init_ae_dev(struct hnae3_ae_dev
*ae_dev
)
11695 struct pci_dev
*pdev
= ae_dev
->pdev
;
11696 struct hclge_dev
*hdev
;
11699 hdev
= devm_kzalloc(&pdev
->dev
, sizeof(*hdev
), GFP_KERNEL
);
11704 hdev
->ae_dev
= ae_dev
;
11705 hdev
->reset_type
= HNAE3_NONE_RESET
;
11706 hdev
->reset_level
= HNAE3_FUNC_RESET
;
11707 ae_dev
->priv
= hdev
;
11709 /* HW supprt 2 layer vlan */
11710 hdev
->mps
= ETH_FRAME_LEN
+ ETH_FCS_LEN
+ 2 * VLAN_HLEN
;
11712 mutex_init(&hdev
->vport_lock
);
11713 spin_lock_init(&hdev
->fd_rule_lock
);
11714 sema_init(&hdev
->reset_sem
, 1);
11716 ret
= hclge_pci_init(hdev
);
11720 /* Firmware command queue initialize */
11721 ret
= hclge_comm_cmd_queue_init(hdev
->pdev
, &hdev
->hw
.hw
);
11723 goto err_pci_uninit
;
11725 /* Firmware command initialize */
11726 hclge_comm_cmd_init_ops(&hdev
->hw
.hw
, &hclge_cmq_ops
);
11727 ret
= hclge_comm_cmd_init(hdev
->ae_dev
, &hdev
->hw
.hw
, &hdev
->fw_version
,
11728 true, hdev
->reset_pending
);
11730 goto err_cmd_uninit
;
11732 ret
= hclge_clear_hw_resource(hdev
);
11734 goto err_cmd_uninit
;
11736 ret
= hclge_get_cap(hdev
);
11738 goto err_cmd_uninit
;
11740 ret
= hclge_query_dev_specs(hdev
);
11742 dev_err(&pdev
->dev
, "failed to query dev specifications, ret = %d.\n",
11744 goto err_cmd_uninit
;
11747 ret
= hclge_configure(hdev
);
11749 dev_err(&pdev
->dev
, "Configure dev error, ret = %d.\n", ret
);
11750 goto err_cmd_uninit
;
11753 ret
= hclge_init_msi(hdev
);
11755 dev_err(&pdev
->dev
, "Init MSI/MSI-X error, ret = %d.\n", ret
);
11756 goto err_cmd_uninit
;
11759 ret
= hclge_misc_irq_init(hdev
);
11761 goto err_msi_uninit
;
11763 ret
= hclge_alloc_tqps(hdev
);
11765 dev_err(&pdev
->dev
, "Allocate TQPs error, ret = %d.\n", ret
);
11766 goto err_msi_irq_uninit
;
11769 ret
= hclge_alloc_vport(hdev
);
11771 goto err_msi_irq_uninit
;
11773 ret
= hclge_map_tqp(hdev
);
11775 goto err_msi_irq_uninit
;
11777 if (hdev
->hw
.mac
.media_type
== HNAE3_MEDIA_TYPE_COPPER
) {
11778 clear_bit(HNAE3_DEV_SUPPORT_FEC_B
, ae_dev
->caps
);
11779 if (hnae3_dev_phy_imp_supported(hdev
))
11780 ret
= hclge_update_tp_port_info(hdev
);
11782 ret
= hclge_mac_mdio_config(hdev
);
11785 goto err_msi_irq_uninit
;
11788 ret
= hclge_init_umv_space(hdev
);
11790 goto err_mdiobus_unreg
;
11792 ret
= hclge_mac_init(hdev
);
11794 dev_err(&pdev
->dev
, "Mac init error, ret = %d\n", ret
);
11795 goto err_mdiobus_unreg
;
11798 ret
= hclge_config_tso(hdev
, HCLGE_TSO_MSS_MIN
, HCLGE_TSO_MSS_MAX
);
11800 dev_err(&pdev
->dev
, "Enable tso fail, ret =%d\n", ret
);
11801 goto err_mdiobus_unreg
;
11804 ret
= hclge_config_gro(hdev
);
11806 goto err_mdiobus_unreg
;
11808 ret
= hclge_init_vlan_config(hdev
);
11810 dev_err(&pdev
->dev
, "VLAN init fail, ret =%d\n", ret
);
11811 goto err_mdiobus_unreg
;
11814 ret
= hclge_tm_schd_init(hdev
);
11816 dev_err(&pdev
->dev
, "tm schd init fail, ret =%d\n", ret
);
11817 goto err_mdiobus_unreg
;
11820 ret
= hclge_comm_rss_init_cfg(&hdev
->vport
->nic
, hdev
->ae_dev
,
11823 dev_err(&pdev
->dev
, "failed to init rss cfg, ret = %d\n", ret
);
11824 goto err_mdiobus_unreg
;
11827 ret
= hclge_rss_init_hw(hdev
);
11829 dev_err(&pdev
->dev
, "Rss init fail, ret =%d\n", ret
);
11830 goto err_mdiobus_unreg
;
11833 ret
= init_mgr_tbl(hdev
);
11835 dev_err(&pdev
->dev
, "manager table init fail, ret =%d\n", ret
);
11836 goto err_mdiobus_unreg
;
11839 ret
= hclge_init_fd_config(hdev
);
11841 dev_err(&pdev
->dev
,
11842 "fd table init fail, ret=%d\n", ret
);
11843 goto err_mdiobus_unreg
;
11846 ret
= hclge_ptp_init(hdev
);
11848 goto err_mdiobus_unreg
;
11850 ret
= hclge_update_port_info(hdev
);
11852 goto err_ptp_uninit
;
11854 INIT_KFIFO(hdev
->mac_tnl_log
);
11856 hclge_dcb_ops_set(hdev
);
11858 timer_setup(&hdev
->reset_timer
, hclge_reset_timer
, 0);
11859 INIT_DELAYED_WORK(&hdev
->service_task
, hclge_service_task
);
11861 hclge_clear_all_event_cause(hdev
);
11862 hclge_clear_resetting_state(hdev
);
11864 /* Log and clear the hw errors those already occurred */
11865 if (hnae3_dev_ras_imp_supported(hdev
))
11866 hclge_handle_occurred_error(hdev
);
11868 hclge_handle_all_hns_hw_errors(ae_dev
);
11870 /* request delayed reset for the error recovery because an immediate
11871 * global reset on a PF affecting pending initialization of other PFs
11873 if (ae_dev
->hw_err_reset_req
) {
11874 enum hnae3_reset_type reset_level
;
11876 reset_level
= hclge_get_reset_level(ae_dev
,
11877 &ae_dev
->hw_err_reset_req
);
11878 hclge_set_def_reset_request(ae_dev
, reset_level
);
11879 mod_timer(&hdev
->reset_timer
, jiffies
+ HCLGE_RESET_INTERVAL
);
11882 hclge_init_rxd_adv_layout(hdev
);
11884 /* Enable MISC vector(vector0) */
11885 hclge_enable_vector(&hdev
->misc_vector
, true);
11887 ret
= hclge_init_wol(hdev
);
11889 dev_warn(&pdev
->dev
,
11890 "failed to wake on lan init, ret = %d\n", ret
);
11892 ret
= hclge_devlink_init(hdev
);
11894 goto err_ptp_uninit
;
11896 hclge_state_init(hdev
);
11897 hdev
->last_reset_time
= jiffies
;
11899 dev_info(&hdev
->pdev
->dev
, "%s driver initialization finished.\n",
11900 HCLGE_DRIVER_NAME
);
11902 hclge_task_schedule(hdev
, round_jiffies_relative(HZ
));
11906 hclge_ptp_uninit(hdev
);
11908 if (hdev
->hw
.mac
.phydev
)
11909 mdiobus_unregister(hdev
->hw
.mac
.mdio_bus
);
11910 err_msi_irq_uninit
:
11911 hclge_misc_irq_uninit(hdev
);
11913 pci_free_irq_vectors(pdev
);
11915 hclge_comm_cmd_uninit(hdev
->ae_dev
, &hdev
->hw
.hw
);
11917 pcim_iounmap(pdev
, hdev
->hw
.hw
.io_base
);
11918 pci_release_regions(pdev
);
11919 pci_disable_device(pdev
);
11921 mutex_destroy(&hdev
->vport_lock
);
11925 static void hclge_stats_clear(struct hclge_dev
*hdev
)
11927 memset(&hdev
->mac_stats
, 0, sizeof(hdev
->mac_stats
));
11928 memset(&hdev
->fec_stats
, 0, sizeof(hdev
->fec_stats
));
11931 static int hclge_set_mac_spoofchk(struct hclge_dev
*hdev
, int vf
, bool enable
)
11933 return hclge_config_switch_param(hdev
, vf
, enable
,
11934 HCLGE_SWITCH_ANTI_SPOOF_MASK
);
11937 static int hclge_set_vlan_spoofchk(struct hclge_dev
*hdev
, int vf
, bool enable
)
11939 return hclge_set_vlan_filter_ctrl(hdev
, HCLGE_FILTER_TYPE_VF
,
11940 HCLGE_FILTER_FE_NIC_INGRESS_B
,
11944 static int hclge_set_vf_spoofchk_hw(struct hclge_dev
*hdev
, int vf
, bool enable
)
11948 ret
= hclge_set_mac_spoofchk(hdev
, vf
, enable
);
11950 dev_err(&hdev
->pdev
->dev
,
11951 "Set vf %d mac spoof check %s failed, ret=%d\n",
11952 vf
, enable
? "on" : "off", ret
);
11956 ret
= hclge_set_vlan_spoofchk(hdev
, vf
, enable
);
11958 dev_err(&hdev
->pdev
->dev
,
11959 "Set vf %d vlan spoof check %s failed, ret=%d\n",
11960 vf
, enable
? "on" : "off", ret
);
11965 static int hclge_set_vf_spoofchk(struct hnae3_handle
*handle
, int vf
,
11968 struct hclge_vport
*vport
= hclge_get_vport(handle
);
11969 struct hclge_dev
*hdev
= vport
->back
;
11970 u32 new_spoofchk
= enable
? 1 : 0;
11973 if (hdev
->ae_dev
->dev_version
< HNAE3_DEVICE_VERSION_V2
)
11974 return -EOPNOTSUPP
;
11976 vport
= hclge_get_vf_vport(hdev
, vf
);
11980 if (vport
->vf_info
.spoofchk
== new_spoofchk
)
11983 if (enable
&& test_bit(vport
->vport_id
, hdev
->vf_vlan_full
))
11984 dev_warn(&hdev
->pdev
->dev
,
11985 "vf %d vlan table is full, enable spoof check may cause its packet send fail\n",
11987 else if (enable
&& hclge_is_umv_space_full(vport
, true))
11988 dev_warn(&hdev
->pdev
->dev
,
11989 "vf %d mac table is full, enable spoof check may cause its packet send fail\n",
11992 ret
= hclge_set_vf_spoofchk_hw(hdev
, vport
->vport_id
, enable
);
11996 vport
->vf_info
.spoofchk
= new_spoofchk
;
12000 static int hclge_reset_vport_spoofchk(struct hclge_dev
*hdev
)
12002 struct hclge_vport
*vport
= hdev
->vport
;
12006 if (hdev
->ae_dev
->dev_version
< HNAE3_DEVICE_VERSION_V2
)
12009 /* resume the vf spoof check state after reset */
12010 for (i
= 0; i
< hdev
->num_alloc_vport
; i
++) {
12011 ret
= hclge_set_vf_spoofchk_hw(hdev
, vport
->vport_id
,
12012 vport
->vf_info
.spoofchk
);
12022 static int hclge_set_vf_trust(struct hnae3_handle
*handle
, int vf
, bool enable
)
12024 struct hclge_vport
*vport
= hclge_get_vport(handle
);
12025 struct hclge_dev
*hdev
= vport
->back
;
12026 u32 new_trusted
= enable
? 1 : 0;
12028 vport
= hclge_get_vf_vport(hdev
, vf
);
12032 if (vport
->vf_info
.trusted
== new_trusted
)
12035 vport
->vf_info
.trusted
= new_trusted
;
12036 set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE
, &vport
->state
);
12037 hclge_task_schedule(hdev
, 0);
12042 static void hclge_reset_vf_rate(struct hclge_dev
*hdev
)
12047 /* reset vf rate to default value */
12048 for (vf
= HCLGE_VF_VPORT_START_NUM
; vf
< hdev
->num_alloc_vport
; vf
++) {
12049 struct hclge_vport
*vport
= &hdev
->vport
[vf
];
12051 vport
->vf_info
.max_tx_rate
= 0;
12052 ret
= hclge_tm_qs_shaper_cfg(vport
, vport
->vf_info
.max_tx_rate
);
12054 dev_err(&hdev
->pdev
->dev
,
12055 "vf%d failed to reset to default, ret=%d\n",
12056 vf
- HCLGE_VF_VPORT_START_NUM
, ret
);
12060 static int hclge_vf_rate_param_check(struct hclge_dev
*hdev
,
12061 int min_tx_rate
, int max_tx_rate
)
12063 if (min_tx_rate
!= 0 ||
12064 max_tx_rate
< 0 || max_tx_rate
> hdev
->hw
.mac
.max_speed
) {
12065 dev_err(&hdev
->pdev
->dev
,
12066 "min_tx_rate:%d [0], max_tx_rate:%d [0, %u]\n",
12067 min_tx_rate
, max_tx_rate
, hdev
->hw
.mac
.max_speed
);
12074 static int hclge_set_vf_rate(struct hnae3_handle
*handle
, int vf
,
12075 int min_tx_rate
, int max_tx_rate
, bool force
)
12077 struct hclge_vport
*vport
= hclge_get_vport(handle
);
12078 struct hclge_dev
*hdev
= vport
->back
;
12081 ret
= hclge_vf_rate_param_check(hdev
, min_tx_rate
, max_tx_rate
);
12085 vport
= hclge_get_vf_vport(hdev
, vf
);
12089 if (!force
&& max_tx_rate
== vport
->vf_info
.max_tx_rate
)
12092 ret
= hclge_tm_qs_shaper_cfg(vport
, max_tx_rate
);
12096 vport
->vf_info
.max_tx_rate
= max_tx_rate
;
12101 static int hclge_resume_vf_rate(struct hclge_dev
*hdev
)
12103 struct hnae3_handle
*handle
= &hdev
->vport
->nic
;
12104 struct hclge_vport
*vport
;
12108 /* resume the vf max_tx_rate after reset */
12109 for (vf
= 0; vf
< pci_num_vf(hdev
->pdev
); vf
++) {
12110 vport
= hclge_get_vf_vport(hdev
, vf
);
12114 /* zero means max rate, after reset, firmware already set it to
12115 * max rate, so just continue.
12117 if (!vport
->vf_info
.max_tx_rate
)
12120 ret
= hclge_set_vf_rate(handle
, vf
, 0,
12121 vport
->vf_info
.max_tx_rate
, true);
12123 dev_err(&hdev
->pdev
->dev
,
12124 "vf%d failed to resume tx_rate:%u, ret=%d\n",
12125 vf
, vport
->vf_info
.max_tx_rate
, ret
);
12133 static void hclge_reset_vport_state(struct hclge_dev
*hdev
)
12135 struct hclge_vport
*vport
= hdev
->vport
;
12138 for (i
= 0; i
< hdev
->num_alloc_vport
; i
++) {
12139 clear_bit(HCLGE_VPORT_STATE_ALIVE
, &vport
->state
);
12144 static int hclge_reset_ae_dev(struct hnae3_ae_dev
*ae_dev
)
12146 struct hclge_dev
*hdev
= ae_dev
->priv
;
12147 struct pci_dev
*pdev
= ae_dev
->pdev
;
12150 set_bit(HCLGE_STATE_DOWN
, &hdev
->state
);
12152 hclge_stats_clear(hdev
);
12153 /* NOTE: pf reset needn't to clear or restore pf and vf table entry.
12154 * so here should not clean table in memory.
12156 if (hdev
->reset_type
== HNAE3_IMP_RESET
||
12157 hdev
->reset_type
== HNAE3_GLOBAL_RESET
) {
12158 memset(hdev
->vlan_table
, 0, sizeof(hdev
->vlan_table
));
12159 memset(hdev
->vf_vlan_full
, 0, sizeof(hdev
->vf_vlan_full
));
12160 bitmap_set(hdev
->vport_config_block
, 0, hdev
->num_alloc_vport
);
12161 hclge_reset_umv_space(hdev
);
12164 ret
= hclge_comm_cmd_init(hdev
->ae_dev
, &hdev
->hw
.hw
, &hdev
->fw_version
,
12165 true, hdev
->reset_pending
);
12167 dev_err(&pdev
->dev
, "Cmd queue init failed\n");
12171 ret
= hclge_map_tqp(hdev
);
12173 dev_err(&pdev
->dev
, "Map tqp error, ret = %d.\n", ret
);
12177 ret
= hclge_mac_init(hdev
);
12179 dev_err(&pdev
->dev
, "Mac init error, ret = %d\n", ret
);
12183 ret
= hclge_tp_port_init(hdev
);
12185 dev_err(&pdev
->dev
, "failed to init tp port, ret = %d\n",
12190 ret
= hclge_config_tso(hdev
, HCLGE_TSO_MSS_MIN
, HCLGE_TSO_MSS_MAX
);
12192 dev_err(&pdev
->dev
, "Enable tso fail, ret =%d\n", ret
);
12196 ret
= hclge_config_gro(hdev
);
12200 ret
= hclge_init_vlan_config(hdev
);
12202 dev_err(&pdev
->dev
, "VLAN init fail, ret =%d\n", ret
);
12206 hclge_reset_tc_config(hdev
);
12208 ret
= hclge_tm_init_hw(hdev
, true);
12210 dev_err(&pdev
->dev
, "tm init hw fail, ret =%d\n", ret
);
12214 ret
= hclge_rss_init_hw(hdev
);
12216 dev_err(&pdev
->dev
, "Rss init fail, ret =%d\n", ret
);
12220 ret
= init_mgr_tbl(hdev
);
12222 dev_err(&pdev
->dev
,
12223 "failed to reinit manager table, ret = %d\n", ret
);
12227 ret
= hclge_init_fd_config(hdev
);
12229 dev_err(&pdev
->dev
, "fd table init fail, ret=%d\n", ret
);
12233 ret
= hclge_ptp_init(hdev
);
12237 /* Log and clear the hw errors those already occurred */
12238 if (hnae3_dev_ras_imp_supported(hdev
))
12239 hclge_handle_occurred_error(hdev
);
12241 hclge_handle_all_hns_hw_errors(ae_dev
);
12243 /* Re-enable the hw error interrupts because
12244 * the interrupts get disabled on global reset.
12246 ret
= hclge_config_nic_hw_error(hdev
, true);
12248 dev_err(&pdev
->dev
,
12249 "fail(%d) to re-enable NIC hw error interrupts\n",
12254 if (hdev
->roce_client
) {
12255 ret
= hclge_config_rocee_ras_interrupt(hdev
, true);
12257 dev_err(&pdev
->dev
,
12258 "fail(%d) to re-enable roce ras interrupts\n",
12264 hclge_reset_vport_state(hdev
);
12265 ret
= hclge_reset_vport_spoofchk(hdev
);
12269 ret
= hclge_resume_vf_rate(hdev
);
12273 hclge_init_rxd_adv_layout(hdev
);
12275 ret
= hclge_update_wol(hdev
);
12277 dev_warn(&pdev
->dev
,
12278 "failed to update wol config, ret = %d\n", ret
);
12280 dev_info(&pdev
->dev
, "Reset done, %s driver initialization finished.\n",
12281 HCLGE_DRIVER_NAME
);
12286 static void hclge_uninit_ae_dev(struct hnae3_ae_dev
*ae_dev
)
12288 struct hclge_dev
*hdev
= ae_dev
->priv
;
12289 struct hclge_mac
*mac
= &hdev
->hw
.mac
;
12291 hclge_reset_vf_rate(hdev
);
12292 hclge_clear_vf_vlan(hdev
);
12293 hclge_state_uninit(hdev
);
12294 hclge_ptp_uninit(hdev
);
12295 hclge_uninit_rxd_adv_layout(hdev
);
12296 hclge_uninit_mac_table(hdev
);
12297 hclge_del_all_fd_entries(hdev
);
12300 mdiobus_unregister(mac
->mdio_bus
);
12302 /* Disable MISC vector(vector0) */
12303 hclge_enable_vector(&hdev
->misc_vector
, false);
12304 synchronize_irq(hdev
->misc_vector
.vector_irq
);
12306 /* Disable all hw interrupts */
12307 hclge_config_mac_tnl_int(hdev
, false);
12308 hclge_config_nic_hw_error(hdev
, false);
12309 hclge_config_rocee_ras_interrupt(hdev
, false);
12311 hclge_comm_cmd_uninit(hdev
->ae_dev
, &hdev
->hw
.hw
);
12312 hclge_misc_irq_uninit(hdev
);
12313 hclge_devlink_uninit(hdev
);
12314 hclge_pci_uninit(hdev
);
12315 hclge_uninit_vport_vlan_table(hdev
);
12316 mutex_destroy(&hdev
->vport_lock
);
12317 ae_dev
->priv
= NULL
;
12320 static u32
hclge_get_max_channels(struct hnae3_handle
*handle
)
12322 struct hclge_vport
*vport
= hclge_get_vport(handle
);
12323 struct hclge_dev
*hdev
= vport
->back
;
12325 return min_t(u32
, hdev
->pf_rss_size_max
, vport
->alloc_tqps
);
12328 static void hclge_get_channels(struct hnae3_handle
*handle
,
12329 struct ethtool_channels
*ch
)
12331 ch
->max_combined
= hclge_get_max_channels(handle
);
12332 ch
->other_count
= 1;
12334 ch
->combined_count
= handle
->kinfo
.rss_size
;
12337 static void hclge_get_tqps_and_rss_info(struct hnae3_handle
*handle
,
12338 u16
*alloc_tqps
, u16
*max_rss_size
)
12340 struct hclge_vport
*vport
= hclge_get_vport(handle
);
12341 struct hclge_dev
*hdev
= vport
->back
;
12343 *alloc_tqps
= vport
->alloc_tqps
;
12344 *max_rss_size
= hdev
->pf_rss_size_max
;
12347 static int hclge_set_rss_tc_mode_cfg(struct hnae3_handle
*handle
)
12349 struct hclge_vport
*vport
= hclge_get_vport(handle
);
12350 u16 tc_offset
[HCLGE_MAX_TC_NUM
] = {0};
12351 struct hclge_dev
*hdev
= vport
->back
;
12352 u16 tc_size
[HCLGE_MAX_TC_NUM
] = {0};
12353 u16 tc_valid
[HCLGE_MAX_TC_NUM
];
12357 roundup_size
= roundup_pow_of_two(vport
->nic
.kinfo
.rss_size
);
12358 roundup_size
= ilog2(roundup_size
);
12359 /* Set the RSS TC mode according to the new RSS size */
12360 for (i
= 0; i
< HCLGE_MAX_TC_NUM
; i
++) {
12363 if (!(hdev
->hw_tc_map
& BIT(i
)))
12367 tc_size
[i
] = roundup_size
;
12368 tc_offset
[i
] = vport
->nic
.kinfo
.rss_size
* i
;
12371 return hclge_comm_set_rss_tc_mode(&hdev
->hw
.hw
, tc_offset
, tc_valid
,
12375 static int hclge_set_channels(struct hnae3_handle
*handle
, u32 new_tqps_num
,
12376 bool rxfh_configured
)
12378 struct hnae3_ae_dev
*ae_dev
= pci_get_drvdata(handle
->pdev
);
12379 struct hclge_vport
*vport
= hclge_get_vport(handle
);
12380 struct hnae3_knic_private_info
*kinfo
= &vport
->nic
.kinfo
;
12381 struct hclge_dev
*hdev
= vport
->back
;
12382 u16 cur_rss_size
= kinfo
->rss_size
;
12383 u16 cur_tqps
= kinfo
->num_tqps
;
12388 kinfo
->req_rss_size
= new_tqps_num
;
12390 ret
= hclge_tm_vport_map_update(hdev
);
12392 dev_err(&hdev
->pdev
->dev
, "tm vport map fail, ret =%d\n", ret
);
12396 ret
= hclge_set_rss_tc_mode_cfg(handle
);
12400 /* RSS indirection table has been configured by user */
12401 if (rxfh_configured
)
12404 /* Reinitializes the rss indirect table according to the new RSS size */
12405 rss_indir
= kcalloc(ae_dev
->dev_specs
.rss_ind_tbl_size
, sizeof(u32
),
12410 for (i
= 0; i
< ae_dev
->dev_specs
.rss_ind_tbl_size
; i
++)
12411 rss_indir
[i
] = i
% kinfo
->rss_size
;
12413 ret
= hclge_set_rss(handle
, rss_indir
, NULL
, 0);
12415 dev_err(&hdev
->pdev
->dev
, "set rss indir table fail, ret=%d\n",
12422 dev_info(&hdev
->pdev
->dev
,
12423 "Channels changed, rss_size from %u to %u, tqps from %u to %u",
12424 cur_rss_size
, kinfo
->rss_size
,
12425 cur_tqps
, kinfo
->rss_size
* kinfo
->tc_info
.num_tc
);
12430 static int hclge_set_led_status(struct hclge_dev
*hdev
, u8 locate_led_status
)
12432 struct hclge_set_led_state_cmd
*req
;
12433 struct hclge_desc desc
;
12436 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_LED_STATUS_CFG
, false);
12438 req
= (struct hclge_set_led_state_cmd
*)desc
.data
;
12439 hnae3_set_field(req
->locate_led_config
, HCLGE_LED_LOCATE_STATE_M
,
12440 HCLGE_LED_LOCATE_STATE_S
, locate_led_status
);
12442 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
12444 dev_err(&hdev
->pdev
->dev
,
12445 "Send set led state cmd error, ret =%d\n", ret
);
12450 enum hclge_led_status
{
12453 HCLGE_LED_NO_CHANGE
= 0xFF,
12456 static int hclge_set_led_id(struct hnae3_handle
*handle
,
12457 enum ethtool_phys_id_state status
)
12459 struct hclge_vport
*vport
= hclge_get_vport(handle
);
12460 struct hclge_dev
*hdev
= vport
->back
;
12463 case ETHTOOL_ID_ACTIVE
:
12464 return hclge_set_led_status(hdev
, HCLGE_LED_ON
);
12465 case ETHTOOL_ID_INACTIVE
:
12466 return hclge_set_led_status(hdev
, HCLGE_LED_OFF
);
12472 static void hclge_get_link_mode(struct hnae3_handle
*handle
,
12473 unsigned long *supported
,
12474 unsigned long *advertising
)
12476 unsigned int size
= BITS_TO_LONGS(__ETHTOOL_LINK_MODE_MASK_NBITS
);
12477 struct hclge_vport
*vport
= hclge_get_vport(handle
);
12478 struct hclge_dev
*hdev
= vport
->back
;
12479 unsigned int idx
= 0;
12481 for (; idx
< size
; idx
++) {
12482 supported
[idx
] = hdev
->hw
.mac
.supported
[idx
];
12483 advertising
[idx
] = hdev
->hw
.mac
.advertising
[idx
];
12487 static int hclge_gro_en(struct hnae3_handle
*handle
, bool enable
)
12489 struct hclge_vport
*vport
= hclge_get_vport(handle
);
12490 struct hclge_dev
*hdev
= vport
->back
;
12491 bool gro_en_old
= hdev
->gro_en
;
12494 hdev
->gro_en
= enable
;
12495 ret
= hclge_config_gro(hdev
);
12497 hdev
->gro_en
= gro_en_old
;
12502 static int hclge_sync_vport_promisc_mode(struct hclge_vport
*vport
)
12504 struct hnae3_handle
*handle
= &vport
->nic
;
12505 struct hclge_dev
*hdev
= vport
->back
;
12506 bool uc_en
= false;
12507 bool mc_en
= false;
12512 if (vport
->last_promisc_flags
!= vport
->overflow_promisc_flags
) {
12513 set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE
, &vport
->state
);
12514 vport
->last_promisc_flags
= vport
->overflow_promisc_flags
;
12517 if (!test_and_clear_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE
,
12522 if (!vport
->vport_id
) {
12523 tmp_flags
= handle
->netdev_flags
| vport
->last_promisc_flags
;
12524 ret
= hclge_set_promisc_mode(handle
, tmp_flags
& HNAE3_UPE
,
12525 tmp_flags
& HNAE3_MPE
);
12527 set_bit(HCLGE_VPORT_STATE_VLAN_FLTR_CHANGE
,
12530 set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE
,
12536 if (vport
->vf_info
.trusted
) {
12537 uc_en
= vport
->vf_info
.request_uc_en
> 0 ||
12538 vport
->overflow_promisc_flags
& HNAE3_OVERFLOW_UPE
;
12539 mc_en
= vport
->vf_info
.request_mc_en
> 0 ||
12540 vport
->overflow_promisc_flags
& HNAE3_OVERFLOW_MPE
;
12542 bc_en
= vport
->vf_info
.request_bc_en
> 0;
12544 ret
= hclge_cmd_set_promisc_mode(hdev
, vport
->vport_id
, uc_en
,
12547 set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE
, &vport
->state
);
12550 hclge_set_vport_vlan_fltr_change(vport
);
12555 static void hclge_sync_promisc_mode(struct hclge_dev
*hdev
)
12557 struct hclge_vport
*vport
;
12561 for (i
= 0; i
< hdev
->num_alloc_vport
; i
++) {
12562 vport
= &hdev
->vport
[i
];
12564 ret
= hclge_sync_vport_promisc_mode(vport
);
12570 static bool hclge_module_existed(struct hclge_dev
*hdev
)
12572 struct hclge_desc desc
;
12576 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_GET_SFP_EXIST
, true);
12577 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
12579 dev_err(&hdev
->pdev
->dev
,
12580 "failed to get SFP exist state, ret = %d\n", ret
);
12584 existed
= le32_to_cpu(desc
.data
[0]);
12586 return existed
!= 0;
12589 /* need 6 bds(total 140 bytes) in one reading
12590 * return the number of bytes actually read, 0 means read failed.
12592 static u16
hclge_get_sfp_eeprom_info(struct hclge_dev
*hdev
, u32 offset
,
12595 struct hclge_desc desc
[HCLGE_SFP_INFO_CMD_NUM
];
12596 struct hclge_sfp_info_bd0_cmd
*sfp_info_bd0
;
12602 /* setup all 6 bds to read module eeprom info. */
12603 for (i
= 0; i
< HCLGE_SFP_INFO_CMD_NUM
; i
++) {
12604 hclge_cmd_setup_basic_desc(&desc
[i
], HCLGE_OPC_GET_SFP_EEPROM
,
12607 /* bd0~bd4 need next flag */
12608 if (i
< HCLGE_SFP_INFO_CMD_NUM
- 1)
12609 desc
[i
].flag
|= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT
);
12612 /* setup bd0, this bd contains offset and read length. */
12613 sfp_info_bd0
= (struct hclge_sfp_info_bd0_cmd
*)desc
[0].data
;
12614 sfp_info_bd0
->offset
= cpu_to_le16((u16
)offset
);
12615 read_len
= min_t(u16
, len
, HCLGE_SFP_INFO_MAX_LEN
);
12616 sfp_info_bd0
->read_len
= cpu_to_le16(read_len
);
12618 ret
= hclge_cmd_send(&hdev
->hw
, desc
, i
);
12620 dev_err(&hdev
->pdev
->dev
,
12621 "failed to get SFP eeprom info, ret = %d\n", ret
);
12625 /* copy sfp info from bd0 to out buffer. */
12626 copy_len
= min_t(u16
, len
, HCLGE_SFP_INFO_BD0_LEN
);
12627 memcpy(data
, sfp_info_bd0
->data
, copy_len
);
12628 read_len
= copy_len
;
12630 /* copy sfp info from bd1~bd5 to out buffer if needed. */
12631 for (i
= 1; i
< HCLGE_SFP_INFO_CMD_NUM
; i
++) {
12632 if (read_len
>= len
)
12635 copy_len
= min_t(u16
, len
- read_len
, HCLGE_SFP_INFO_BDX_LEN
);
12636 memcpy(data
+ read_len
, desc
[i
].data
, copy_len
);
12637 read_len
+= copy_len
;
12643 static int hclge_get_module_eeprom(struct hnae3_handle
*handle
, u32 offset
,
12646 struct hclge_vport
*vport
= hclge_get_vport(handle
);
12647 struct hclge_dev
*hdev
= vport
->back
;
12651 if (hdev
->hw
.mac
.media_type
!= HNAE3_MEDIA_TYPE_FIBER
)
12652 return -EOPNOTSUPP
;
12654 if (!hclge_module_existed(hdev
))
12657 while (read_len
< len
) {
12658 data_len
= hclge_get_sfp_eeprom_info(hdev
,
12665 read_len
+= data_len
;
12671 static int hclge_get_link_diagnosis_info(struct hnae3_handle
*handle
,
12674 struct hclge_vport
*vport
= hclge_get_vport(handle
);
12675 struct hclge_dev
*hdev
= vport
->back
;
12676 struct hclge_desc desc
;
12679 if (hdev
->ae_dev
->dev_version
<= HNAE3_DEVICE_VERSION_V2
)
12680 return -EOPNOTSUPP
;
12682 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_QUERY_LINK_DIAGNOSIS
, true);
12683 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
12685 dev_err(&hdev
->pdev
->dev
,
12686 "failed to query link diagnosis info, ret = %d\n", ret
);
12690 *status_code
= le32_to_cpu(desc
.data
[0]);
12694 /* After disable sriov, VF still has some config and info need clean,
12695 * which configed by PF.
12697 static void hclge_clear_vport_vf_info(struct hclge_vport
*vport
, int vfid
)
12699 struct hclge_dev
*hdev
= vport
->back
;
12700 struct hclge_vlan_info vlan_info
;
12703 clear_bit(HCLGE_VPORT_STATE_INITED
, &vport
->state
);
12704 clear_bit(HCLGE_VPORT_STATE_ALIVE
, &vport
->state
);
12705 vport
->need_notify
= 0;
12708 /* after disable sriov, clean VF rate configured by PF */
12709 ret
= hclge_tm_qs_shaper_cfg(vport
, 0);
12711 dev_err(&hdev
->pdev
->dev
,
12712 "failed to clean vf%d rate config, ret = %d\n",
12715 vlan_info
.vlan_tag
= 0;
12717 vlan_info
.vlan_proto
= ETH_P_8021Q
;
12718 ret
= hclge_update_port_base_vlan_cfg(vport
,
12719 HNAE3_PORT_BASE_VLAN_DISABLE
,
12722 dev_err(&hdev
->pdev
->dev
,
12723 "failed to clean vf%d port base vlan, ret = %d\n",
12726 ret
= hclge_set_vf_spoofchk_hw(hdev
, vport
->vport_id
, false);
12728 dev_err(&hdev
->pdev
->dev
,
12729 "failed to clean vf%d spoof config, ret = %d\n",
12732 memset(&vport
->vf_info
, 0, sizeof(vport
->vf_info
));
12735 static void hclge_clean_vport_config(struct hnae3_ae_dev
*ae_dev
, int num_vfs
)
12737 struct hclge_dev
*hdev
= ae_dev
->priv
;
12738 struct hclge_vport
*vport
;
12741 for (i
= 0; i
< num_vfs
; i
++) {
12742 vport
= &hdev
->vport
[i
+ HCLGE_VF_VPORT_START_NUM
];
12744 hclge_clear_vport_vf_info(vport
, i
);
12748 static int hclge_get_dscp_prio(struct hnae3_handle
*h
, u8 dscp
, u8
*tc_mode
,
12751 struct hclge_vport
*vport
= hclge_get_vport(h
);
12753 if (dscp
>= HNAE3_MAX_DSCP
)
12757 *tc_mode
= vport
->nic
.kinfo
.tc_map_mode
;
12759 *priority
= vport
->nic
.kinfo
.dscp_prio
[dscp
] == HNAE3_PRIO_ID_INVALID
? 0 :
12760 vport
->nic
.kinfo
.dscp_prio
[dscp
];
12765 static const struct hnae3_ae_ops hclge_ops
= {
12766 .init_ae_dev
= hclge_init_ae_dev
,
12767 .uninit_ae_dev
= hclge_uninit_ae_dev
,
12768 .reset_prepare
= hclge_reset_prepare_general
,
12769 .reset_done
= hclge_reset_done
,
12770 .init_client_instance
= hclge_init_client_instance
,
12771 .uninit_client_instance
= hclge_uninit_client_instance
,
12772 .map_ring_to_vector
= hclge_map_ring_to_vector
,
12773 .unmap_ring_from_vector
= hclge_unmap_ring_frm_vector
,
12774 .get_vector
= hclge_get_vector
,
12775 .put_vector
= hclge_put_vector
,
12776 .set_promisc_mode
= hclge_set_promisc_mode
,
12777 .request_update_promisc_mode
= hclge_request_update_promisc_mode
,
12778 .set_loopback
= hclge_set_loopback
,
12779 .start
= hclge_ae_start
,
12780 .stop
= hclge_ae_stop
,
12781 .client_start
= hclge_client_start
,
12782 .client_stop
= hclge_client_stop
,
12783 .get_status
= hclge_get_status
,
12784 .get_ksettings_an_result
= hclge_get_ksettings_an_result
,
12785 .cfg_mac_speed_dup_h
= hclge_cfg_mac_speed_dup_h
,
12786 .get_media_type
= hclge_get_media_type
,
12787 .check_port_speed
= hclge_check_port_speed
,
12788 .get_fec_stats
= hclge_get_fec_stats
,
12789 .get_fec
= hclge_get_fec
,
12790 .set_fec
= hclge_set_fec
,
12791 .get_rss_key_size
= hclge_comm_get_rss_key_size
,
12792 .get_rss
= hclge_get_rss
,
12793 .set_rss
= hclge_set_rss
,
12794 .set_rss_tuple
= hclge_set_rss_tuple
,
12795 .get_rss_tuple
= hclge_get_rss_tuple
,
12796 .get_tc_size
= hclge_get_tc_size
,
12797 .get_mac_addr
= hclge_get_mac_addr
,
12798 .set_mac_addr
= hclge_set_mac_addr
,
12799 .do_ioctl
= hclge_do_ioctl
,
12800 .add_uc_addr
= hclge_add_uc_addr
,
12801 .rm_uc_addr
= hclge_rm_uc_addr
,
12802 .add_mc_addr
= hclge_add_mc_addr
,
12803 .rm_mc_addr
= hclge_rm_mc_addr
,
12804 .set_autoneg
= hclge_set_autoneg
,
12805 .get_autoneg
= hclge_get_autoneg
,
12806 .restart_autoneg
= hclge_restart_autoneg
,
12807 .halt_autoneg
= hclge_halt_autoneg
,
12808 .get_pauseparam
= hclge_get_pauseparam
,
12809 .set_pauseparam
= hclge_set_pauseparam
,
12810 .set_mtu
= hclge_set_mtu
,
12811 .reset_queue
= hclge_reset_tqp
,
12812 .get_stats
= hclge_get_stats
,
12813 .get_mac_stats
= hclge_get_mac_stat
,
12814 .update_stats
= hclge_update_stats
,
12815 .get_strings
= hclge_get_strings
,
12816 .get_sset_count
= hclge_get_sset_count
,
12817 .get_fw_version
= hclge_get_fw_version
,
12818 .get_mdix_mode
= hclge_get_mdix_mode
,
12819 .enable_vlan_filter
= hclge_enable_vlan_filter
,
12820 .set_vlan_filter
= hclge_set_vlan_filter
,
12821 .set_vf_vlan_filter
= hclge_set_vf_vlan_filter
,
12822 .enable_hw_strip_rxvtag
= hclge_en_hw_strip_rxvtag
,
12823 .reset_event
= hclge_reset_event
,
12824 .get_reset_level
= hclge_get_reset_level
,
12825 .set_default_reset_request
= hclge_set_def_reset_request
,
12826 .get_tqps_and_rss_info
= hclge_get_tqps_and_rss_info
,
12827 .set_channels
= hclge_set_channels
,
12828 .get_channels
= hclge_get_channels
,
12829 .get_regs_len
= hclge_get_regs_len
,
12830 .get_regs
= hclge_get_regs
,
12831 .set_led_id
= hclge_set_led_id
,
12832 .get_link_mode
= hclge_get_link_mode
,
12833 .add_fd_entry
= hclge_add_fd_entry
,
12834 .del_fd_entry
= hclge_del_fd_entry
,
12835 .get_fd_rule_cnt
= hclge_get_fd_rule_cnt
,
12836 .get_fd_rule_info
= hclge_get_fd_rule_info
,
12837 .get_fd_all_rules
= hclge_get_all_rules
,
12838 .enable_fd
= hclge_enable_fd
,
12839 .add_arfs_entry
= hclge_add_fd_entry_by_arfs
,
12840 .dbg_read_cmd
= hclge_dbg_read_cmd
,
12841 .handle_hw_ras_error
= hclge_handle_hw_ras_error
,
12842 .get_hw_reset_stat
= hclge_get_hw_reset_stat
,
12843 .ae_dev_resetting
= hclge_ae_dev_resetting
,
12844 .ae_dev_reset_cnt
= hclge_ae_dev_reset_cnt
,
12845 .set_gro_en
= hclge_gro_en
,
12846 .get_global_queue_id
= hclge_covert_handle_qid_global
,
12847 .set_timer_task
= hclge_set_timer_task
,
12848 .mac_connect_phy
= hclge_mac_connect_phy
,
12849 .mac_disconnect_phy
= hclge_mac_disconnect_phy
,
12850 .get_vf_config
= hclge_get_vf_config
,
12851 .set_vf_link_state
= hclge_set_vf_link_state
,
12852 .set_vf_spoofchk
= hclge_set_vf_spoofchk
,
12853 .set_vf_trust
= hclge_set_vf_trust
,
12854 .set_vf_rate
= hclge_set_vf_rate
,
12855 .set_vf_mac
= hclge_set_vf_mac
,
12856 .get_module_eeprom
= hclge_get_module_eeprom
,
12857 .get_cmdq_stat
= hclge_get_cmdq_stat
,
12858 .add_cls_flower
= hclge_add_cls_flower
,
12859 .del_cls_flower
= hclge_del_cls_flower
,
12860 .cls_flower_active
= hclge_is_cls_flower_active
,
12861 .get_phy_link_ksettings
= hclge_get_phy_link_ksettings
,
12862 .set_phy_link_ksettings
= hclge_set_phy_link_ksettings
,
12863 .set_tx_hwts_info
= hclge_ptp_set_tx_info
,
12864 .get_rx_hwts
= hclge_ptp_get_rx_hwts
,
12865 .get_ts_info
= hclge_ptp_get_ts_info
,
12866 .get_link_diagnosis_info
= hclge_get_link_diagnosis_info
,
12867 .clean_vf_config
= hclge_clean_vport_config
,
12868 .get_dscp_prio
= hclge_get_dscp_prio
,
12869 .get_wol
= hclge_get_wol
,
12870 .set_wol
= hclge_set_wol
,
12873 static struct hnae3_ae_algo ae_algo
= {
12875 .pdev_id_table
= ae_algo_pci_tbl
,
12878 static int __init
hclge_init(void)
12880 pr_info("%s is initializing\n", HCLGE_NAME
);
12882 hclge_wq
= alloc_workqueue("%s", WQ_UNBOUND
, 0, HCLGE_NAME
);
12884 pr_err("%s: failed to create workqueue\n", HCLGE_NAME
);
12888 hnae3_register_ae_algo(&ae_algo
);
12893 static void __exit
hclge_exit(void)
12895 hnae3_unregister_ae_algo_prepare(&ae_algo
);
12896 hnae3_unregister_ae_algo(&ae_algo
);
12897 destroy_workqueue(hclge_wq
);
12899 module_init(hclge_init
);
12900 module_exit(hclge_exit
);
12902 MODULE_LICENSE("GPL");
12903 MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
12904 MODULE_DESCRIPTION("HCLGE Driver");
12905 MODULE_VERSION(HCLGE_MOD_VERSION
);