1 // SPDX-License-Identifier: GPL-2.0+
2 // Copyright (c) 2016-2017 Hisilicon Limited.
4 #include <linux/etherdevice.h>
5 #include <linux/iopoll.h>
6 #include <net/rtnetlink.h>
7 #include "hclgevf_cmd.h"
8 #include "hclgevf_main.h"
9 #include "hclgevf_regs.h"
10 #include "hclge_mbx.h"
12 #include "hclgevf_devlink.h"
13 #include "hclge_comm_rss.h"
14 #include "hclgevf_trace.h"
16 #define HCLGEVF_NAME "hclgevf"
18 #define HCLGEVF_RESET_MAX_FAIL_CNT 5
20 static int hclgevf_reset_hdev(struct hclgevf_dev
*hdev
);
21 static void hclgevf_task_schedule(struct hclgevf_dev
*hdev
,
24 static struct hnae3_ae_algo ae_algovf
;
26 static struct workqueue_struct
*hclgevf_wq
;
28 static const struct pci_device_id ae_algovf_pci_tbl
[] = {
29 {PCI_VDEVICE(HUAWEI
, HNAE3_DEV_ID_VF
), 0},
30 {PCI_VDEVICE(HUAWEI
, HNAE3_DEV_ID_RDMA_DCB_PFC_VF
),
31 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS
},
32 /* required last entry */
36 MODULE_DEVICE_TABLE(pci
, ae_algovf_pci_tbl
);
38 /* hclgevf_cmd_send - send command to command queue
39 * @hw: pointer to the hw struct
40 * @desc: prefilled descriptor for describing the command
41 * @num : the number of descriptors to be sent
43 * This is the main send command for command queue, it
44 * sends the queue, cleans the queue, etc
46 int hclgevf_cmd_send(struct hclgevf_hw
*hw
, struct hclge_desc
*desc
, int num
)
48 return hclge_comm_cmd_send(&hw
->hw
, desc
, num
);
51 static void hclgevf_trace_cmd_send(struct hclge_comm_hw
*hw
, struct hclge_desc
*desc
,
52 int num
, bool is_special
)
56 trace_hclge_vf_cmd_send(hw
, desc
, 0, num
);
61 for (i
= 1; i
< num
; i
++)
62 trace_hclge_vf_cmd_send(hw
, &desc
[i
], i
, num
);
65 static void hclgevf_trace_cmd_get(struct hclge_comm_hw
*hw
, struct hclge_desc
*desc
,
66 int num
, bool is_special
)
70 if (!HCLGE_COMM_SEND_SYNC(le16_to_cpu(desc
->flag
)))
73 trace_hclge_vf_cmd_get(hw
, desc
, 0, num
);
78 for (i
= 1; i
< num
; i
++)
79 trace_hclge_vf_cmd_get(hw
, &desc
[i
], i
, num
);
82 static const struct hclge_comm_cmq_ops hclgevf_cmq_ops
= {
83 .trace_cmd_send
= hclgevf_trace_cmd_send
,
84 .trace_cmd_get
= hclgevf_trace_cmd_get
,
87 void hclgevf_arq_init(struct hclgevf_dev
*hdev
)
89 struct hclge_comm_cmq
*cmdq
= &hdev
->hw
.hw
.cmq
;
91 spin_lock(&cmdq
->crq
.lock
);
92 /* initialize the pointers of async rx queue of mailbox */
93 hdev
->arq
.hdev
= hdev
;
96 atomic_set(&hdev
->arq
.count
, 0);
97 spin_unlock(&cmdq
->crq
.lock
);
100 struct hclgevf_dev
*hclgevf_ae_get_hdev(struct hnae3_handle
*handle
)
103 return container_of(handle
, struct hclgevf_dev
, nic
);
104 else if (handle
->client
->type
== HNAE3_CLIENT_ROCE
)
105 return container_of(handle
, struct hclgevf_dev
, roce
);
107 return container_of(handle
, struct hclgevf_dev
, nic
);
110 static void hclgevf_update_stats(struct hnae3_handle
*handle
)
112 struct hclgevf_dev
*hdev
= hclgevf_ae_get_hdev(handle
);
115 status
= hclge_comm_tqps_update_stats(handle
, &hdev
->hw
.hw
);
117 dev_err(&hdev
->pdev
->dev
,
118 "VF update of TQPS stats fail, status = %d.\n",
122 static int hclgevf_get_sset_count(struct hnae3_handle
*handle
, int strset
)
124 if (strset
== ETH_SS_TEST
)
126 else if (strset
== ETH_SS_STATS
)
127 return hclge_comm_tqps_get_sset_count(handle
);
132 static void hclgevf_get_strings(struct hnae3_handle
*handle
, u32 strset
,
135 if (strset
== ETH_SS_STATS
)
136 hclge_comm_tqps_get_strings(handle
, data
);
139 static void hclgevf_get_stats(struct hnae3_handle
*handle
, u64
*data
)
141 hclge_comm_tqps_get_stats(handle
, data
);
144 static void hclgevf_build_send_msg(struct hclge_vf_to_pf_msg
*msg
, u8 code
,
148 memset(msg
, 0, sizeof(struct hclge_vf_to_pf_msg
));
150 msg
->subcode
= subcode
;
154 static int hclgevf_get_basic_info(struct hclgevf_dev
*hdev
)
156 struct hnae3_ae_dev
*ae_dev
= hdev
->ae_dev
;
157 u8 resp_msg
[HCLGE_MBX_MAX_RESP_DATA_SIZE
];
158 struct hclge_basic_info
*basic_info
;
159 struct hclge_vf_to_pf_msg send_msg
;
163 hclgevf_build_send_msg(&send_msg
, HCLGE_MBX_GET_BASIC_INFO
, 0);
164 status
= hclgevf_send_mbx_msg(hdev
, &send_msg
, true, resp_msg
,
167 dev_err(&hdev
->pdev
->dev
,
168 "failed to get basic info from pf, ret = %d", status
);
172 basic_info
= (struct hclge_basic_info
*)resp_msg
;
174 hdev
->hw_tc_map
= basic_info
->hw_tc_map
;
175 hdev
->mbx_api_version
= le16_to_cpu(basic_info
->mbx_api_version
);
176 caps
= le32_to_cpu(basic_info
->pf_caps
);
177 if (test_bit(HNAE3_PF_SUPPORT_VLAN_FLTR_MDF_B
, &caps
))
178 set_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B
, ae_dev
->caps
);
183 static int hclgevf_get_port_base_vlan_filter_state(struct hclgevf_dev
*hdev
)
185 struct hnae3_handle
*nic
= &hdev
->nic
;
186 struct hclge_vf_to_pf_msg send_msg
;
190 hclgevf_build_send_msg(&send_msg
, HCLGE_MBX_SET_VLAN
,
191 HCLGE_MBX_GET_PORT_BASE_VLAN_STATE
);
192 ret
= hclgevf_send_mbx_msg(hdev
, &send_msg
, true, &resp_msg
,
195 dev_err(&hdev
->pdev
->dev
,
196 "VF request to get port based vlan state failed %d",
201 nic
->port_base_vlan_state
= resp_msg
;
206 static int hclgevf_get_queue_info(struct hclgevf_dev
*hdev
)
208 #define HCLGEVF_TQPS_RSS_INFO_LEN 6
210 struct hclge_mbx_vf_queue_info
*queue_info
;
211 u8 resp_msg
[HCLGEVF_TQPS_RSS_INFO_LEN
];
212 struct hclge_vf_to_pf_msg send_msg
;
215 hclgevf_build_send_msg(&send_msg
, HCLGE_MBX_GET_QINFO
, 0);
216 status
= hclgevf_send_mbx_msg(hdev
, &send_msg
, true, resp_msg
,
217 HCLGEVF_TQPS_RSS_INFO_LEN
);
219 dev_err(&hdev
->pdev
->dev
,
220 "VF request to get tqp info from PF failed %d",
225 queue_info
= (struct hclge_mbx_vf_queue_info
*)resp_msg
;
226 hdev
->num_tqps
= le16_to_cpu(queue_info
->num_tqps
);
227 hdev
->rss_size_max
= le16_to_cpu(queue_info
->rss_size
);
228 hdev
->rx_buf_len
= le16_to_cpu(queue_info
->rx_buf_len
);
233 static int hclgevf_get_queue_depth(struct hclgevf_dev
*hdev
)
235 #define HCLGEVF_TQPS_DEPTH_INFO_LEN 4
237 struct hclge_mbx_vf_queue_depth
*queue_depth
;
238 u8 resp_msg
[HCLGEVF_TQPS_DEPTH_INFO_LEN
];
239 struct hclge_vf_to_pf_msg send_msg
;
242 hclgevf_build_send_msg(&send_msg
, HCLGE_MBX_GET_QDEPTH
, 0);
243 ret
= hclgevf_send_mbx_msg(hdev
, &send_msg
, true, resp_msg
,
244 HCLGEVF_TQPS_DEPTH_INFO_LEN
);
246 dev_err(&hdev
->pdev
->dev
,
247 "VF request to get tqp depth info from PF failed %d",
252 queue_depth
= (struct hclge_mbx_vf_queue_depth
*)resp_msg
;
253 hdev
->num_tx_desc
= le16_to_cpu(queue_depth
->num_tx_desc
);
254 hdev
->num_rx_desc
= le16_to_cpu(queue_depth
->num_rx_desc
);
259 static u16
hclgevf_get_qid_global(struct hnae3_handle
*handle
, u16 queue_id
)
261 struct hclgevf_dev
*hdev
= hclgevf_ae_get_hdev(handle
);
262 struct hclge_vf_to_pf_msg send_msg
;
267 hclgevf_build_send_msg(&send_msg
, HCLGE_MBX_GET_QID_IN_PF
, 0);
268 *(__le16
*)send_msg
.data
= cpu_to_le16(queue_id
);
269 ret
= hclgevf_send_mbx_msg(hdev
, &send_msg
, true, resp_data
,
272 qid_in_pf
= le16_to_cpu(*(__le16
*)resp_data
);
277 static int hclgevf_get_pf_media_type(struct hclgevf_dev
*hdev
)
279 struct hclge_vf_to_pf_msg send_msg
;
283 hclgevf_build_send_msg(&send_msg
, HCLGE_MBX_GET_MEDIA_TYPE
, 0);
284 ret
= hclgevf_send_mbx_msg(hdev
, &send_msg
, true, resp_msg
,
287 dev_err(&hdev
->pdev
->dev
,
288 "VF request to get the pf port media type failed %d",
293 hdev
->hw
.mac
.media_type
= resp_msg
[0];
294 hdev
->hw
.mac
.module_type
= resp_msg
[1];
299 static int hclgevf_alloc_tqps(struct hclgevf_dev
*hdev
)
301 struct hnae3_ae_dev
*ae_dev
= pci_get_drvdata(hdev
->pdev
);
302 struct hclge_comm_tqp
*tqp
;
305 hdev
->htqp
= devm_kcalloc(&hdev
->pdev
->dev
, hdev
->num_tqps
,
306 sizeof(struct hclge_comm_tqp
), GFP_KERNEL
);
312 for (i
= 0; i
< hdev
->num_tqps
; i
++) {
313 tqp
->dev
= &hdev
->pdev
->dev
;
316 tqp
->q
.ae_algo
= &ae_algovf
;
317 tqp
->q
.buf_size
= hdev
->rx_buf_len
;
318 tqp
->q
.tx_desc_num
= hdev
->num_tx_desc
;
319 tqp
->q
.rx_desc_num
= hdev
->num_rx_desc
;
321 /* need an extended offset to configure queues >=
322 * HCLGEVF_TQP_MAX_SIZE_DEV_V2.
324 if (i
< HCLGEVF_TQP_MAX_SIZE_DEV_V2
)
325 tqp
->q
.io_base
= hdev
->hw
.hw
.io_base
+
326 HCLGEVF_TQP_REG_OFFSET
+
327 i
* HCLGEVF_TQP_REG_SIZE
;
329 tqp
->q
.io_base
= hdev
->hw
.hw
.io_base
+
330 HCLGEVF_TQP_REG_OFFSET
+
331 HCLGEVF_TQP_EXT_REG_OFFSET
+
332 (i
- HCLGEVF_TQP_MAX_SIZE_DEV_V2
) *
333 HCLGEVF_TQP_REG_SIZE
;
335 /* when device supports tx push and has device memory,
336 * the queue can execute push mode or doorbell mode on
339 if (test_bit(HNAE3_DEV_SUPPORT_TX_PUSH_B
, ae_dev
->caps
))
340 tqp
->q
.mem_base
= hdev
->hw
.hw
.mem_base
+
341 HCLGEVF_TQP_MEM_OFFSET(hdev
, i
);
349 static int hclgevf_knic_setup(struct hclgevf_dev
*hdev
)
351 struct hnae3_handle
*nic
= &hdev
->nic
;
352 struct hnae3_knic_private_info
*kinfo
;
353 u16 new_tqps
= hdev
->num_tqps
;
358 kinfo
->num_tx_desc
= hdev
->num_tx_desc
;
359 kinfo
->num_rx_desc
= hdev
->num_rx_desc
;
360 kinfo
->rx_buf_len
= hdev
->rx_buf_len
;
361 for (i
= 0; i
< HCLGE_COMM_MAX_TC_NUM
; i
++)
362 if (hdev
->hw_tc_map
& BIT(i
))
365 num_tc
= num_tc
? num_tc
: 1;
366 kinfo
->tc_info
.num_tc
= num_tc
;
367 kinfo
->rss_size
= min_t(u16
, hdev
->rss_size_max
, new_tqps
/ num_tc
);
368 new_tqps
= kinfo
->rss_size
* num_tc
;
369 kinfo
->num_tqps
= min(new_tqps
, hdev
->num_tqps
);
371 kinfo
->tqp
= devm_kcalloc(&hdev
->pdev
->dev
, kinfo
->num_tqps
,
372 sizeof(struct hnae3_queue
*), GFP_KERNEL
);
376 for (i
= 0; i
< kinfo
->num_tqps
; i
++) {
377 hdev
->htqp
[i
].q
.handle
= &hdev
->nic
;
378 hdev
->htqp
[i
].q
.tqp_index
= i
;
379 kinfo
->tqp
[i
] = &hdev
->htqp
[i
].q
;
382 /* after init the max rss_size and tqps, adjust the default tqp numbers
383 * and rss size with the actual vector numbers
385 kinfo
->num_tqps
= min_t(u16
, hdev
->num_nic_msix
- 1, kinfo
->num_tqps
);
386 kinfo
->rss_size
= min_t(u16
, kinfo
->num_tqps
/ num_tc
,
392 static void hclgevf_request_link_info(struct hclgevf_dev
*hdev
)
394 struct hclge_vf_to_pf_msg send_msg
;
397 hclgevf_build_send_msg(&send_msg
, HCLGE_MBX_GET_LINK_STATUS
, 0);
398 status
= hclgevf_send_mbx_msg(hdev
, &send_msg
, false, NULL
, 0);
400 dev_err(&hdev
->pdev
->dev
,
401 "VF failed to fetch link status(%d) from PF", status
);
404 void hclgevf_update_link_status(struct hclgevf_dev
*hdev
, int link_state
)
406 struct hnae3_handle
*rhandle
= &hdev
->roce
;
407 struct hnae3_handle
*handle
= &hdev
->nic
;
408 struct hnae3_client
*rclient
;
409 struct hnae3_client
*client
;
411 if (test_and_set_bit(HCLGEVF_STATE_LINK_UPDATING
, &hdev
->state
))
414 client
= handle
->client
;
415 rclient
= hdev
->roce_client
;
418 test_bit(HCLGEVF_STATE_DOWN
, &hdev
->state
) ? 0 : link_state
;
419 if (link_state
!= hdev
->hw
.mac
.link
) {
420 hdev
->hw
.mac
.link
= link_state
;
421 client
->ops
->link_status_change(handle
, !!link_state
);
422 if (rclient
&& rclient
->ops
->link_status_change
)
423 rclient
->ops
->link_status_change(rhandle
, !!link_state
);
426 clear_bit(HCLGEVF_STATE_LINK_UPDATING
, &hdev
->state
);
429 static void hclgevf_update_link_mode(struct hclgevf_dev
*hdev
)
431 #define HCLGEVF_ADVERTISING 0
432 #define HCLGEVF_SUPPORTED 1
434 struct hclge_vf_to_pf_msg send_msg
;
436 hclgevf_build_send_msg(&send_msg
, HCLGE_MBX_GET_LINK_MODE
, 0);
437 send_msg
.data
[0] = HCLGEVF_ADVERTISING
;
438 hclgevf_send_mbx_msg(hdev
, &send_msg
, false, NULL
, 0);
439 send_msg
.data
[0] = HCLGEVF_SUPPORTED
;
440 hclgevf_send_mbx_msg(hdev
, &send_msg
, false, NULL
, 0);
443 static int hclgevf_set_handle_info(struct hclgevf_dev
*hdev
)
445 struct hnae3_handle
*nic
= &hdev
->nic
;
448 nic
->ae_algo
= &ae_algovf
;
449 nic
->pdev
= hdev
->pdev
;
450 bitmap_copy(nic
->numa_node_mask
.bits
, hdev
->numa_node_mask
.bits
,
452 nic
->flags
|= HNAE3_SUPPORT_VF
;
453 nic
->kinfo
.io_base
= hdev
->hw
.hw
.io_base
;
455 ret
= hclgevf_knic_setup(hdev
);
457 dev_err(&hdev
->pdev
->dev
, "VF knic setup failed %d\n",
462 static void hclgevf_free_vector(struct hclgevf_dev
*hdev
, int vector_id
)
464 if (hdev
->vector_status
[vector_id
] == HCLGEVF_INVALID_VPORT
) {
465 dev_warn(&hdev
->pdev
->dev
,
466 "vector(vector_id %d) has been freed.\n", vector_id
);
470 hdev
->vector_status
[vector_id
] = HCLGEVF_INVALID_VPORT
;
471 hdev
->num_msi_left
+= 1;
472 hdev
->num_msi_used
-= 1;
475 static int hclgevf_get_vector(struct hnae3_handle
*handle
, u16 vector_num
,
476 struct hnae3_vector_info
*vector_info
)
478 struct hclgevf_dev
*hdev
= hclgevf_ae_get_hdev(handle
);
479 struct hnae3_vector_info
*vector
= vector_info
;
483 vector_num
= min_t(u16
, hdev
->num_nic_msix
- 1, vector_num
);
484 vector_num
= min(hdev
->num_msi_left
, vector_num
);
486 for (j
= 0; j
< vector_num
; j
++) {
487 for (i
= HCLGEVF_MISC_VECTOR_NUM
+ 1; i
< hdev
->num_msi
; i
++) {
488 if (hdev
->vector_status
[i
] == HCLGEVF_INVALID_VPORT
) {
489 vector
->vector
= pci_irq_vector(hdev
->pdev
, i
);
490 vector
->io_addr
= hdev
->hw
.hw
.io_base
+
491 HCLGEVF_VECTOR_REG_BASE
+
492 (i
- 1) * HCLGEVF_VECTOR_REG_OFFSET
;
493 hdev
->vector_status
[i
] = 0;
494 hdev
->vector_irq
[i
] = vector
->vector
;
503 hdev
->num_msi_left
-= alloc
;
504 hdev
->num_msi_used
+= alloc
;
509 static int hclgevf_get_vector_index(struct hclgevf_dev
*hdev
, int vector
)
513 for (i
= 0; i
< hdev
->num_msi
; i
++)
514 if (vector
== hdev
->vector_irq
[i
])
520 /* for revision 0x20, vf shared the same rss config with pf */
521 static int hclgevf_get_rss_hash_key(struct hclgevf_dev
*hdev
)
523 #define HCLGEVF_RSS_MBX_RESP_LEN 8
524 struct hclge_comm_rss_cfg
*rss_cfg
= &hdev
->rss_cfg
;
525 u8 resp_msg
[HCLGEVF_RSS_MBX_RESP_LEN
];
526 struct hclge_vf_to_pf_msg send_msg
;
527 u16 msg_num
, hash_key_index
;
531 hclgevf_build_send_msg(&send_msg
, HCLGE_MBX_GET_RSS_KEY
, 0);
532 msg_num
= (HCLGE_COMM_RSS_KEY_SIZE
+ HCLGEVF_RSS_MBX_RESP_LEN
- 1) /
533 HCLGEVF_RSS_MBX_RESP_LEN
;
534 for (index
= 0; index
< msg_num
; index
++) {
535 send_msg
.data
[0] = index
;
536 ret
= hclgevf_send_mbx_msg(hdev
, &send_msg
, true, resp_msg
,
537 HCLGEVF_RSS_MBX_RESP_LEN
);
539 dev_err(&hdev
->pdev
->dev
,
540 "VF get rss hash key from PF failed, ret=%d",
545 hash_key_index
= HCLGEVF_RSS_MBX_RESP_LEN
* index
;
546 if (index
== msg_num
- 1)
547 memcpy(&rss_cfg
->rss_hash_key
[hash_key_index
],
549 HCLGE_COMM_RSS_KEY_SIZE
- hash_key_index
);
551 memcpy(&rss_cfg
->rss_hash_key
[hash_key_index
],
552 &resp_msg
[0], HCLGEVF_RSS_MBX_RESP_LEN
);
558 static int hclgevf_get_rss(struct hnae3_handle
*handle
, u32
*indir
, u8
*key
,
561 struct hclgevf_dev
*hdev
= hclgevf_ae_get_hdev(handle
);
562 struct hclge_comm_rss_cfg
*rss_cfg
= &hdev
->rss_cfg
;
565 if (hdev
->ae_dev
->dev_version
>= HNAE3_DEVICE_VERSION_V2
) {
566 hclge_comm_get_rss_hash_info(rss_cfg
, key
, hfunc
);
569 *hfunc
= ETH_RSS_HASH_TOP
;
571 ret
= hclgevf_get_rss_hash_key(hdev
);
574 memcpy(key
, rss_cfg
->rss_hash_key
,
575 HCLGE_COMM_RSS_KEY_SIZE
);
579 hclge_comm_get_rss_indir_tbl(rss_cfg
, indir
,
580 hdev
->ae_dev
->dev_specs
.rss_ind_tbl_size
);
585 static int hclgevf_set_rss(struct hnae3_handle
*handle
, const u32
*indir
,
586 const u8
*key
, const u8 hfunc
)
588 struct hclgevf_dev
*hdev
= hclgevf_ae_get_hdev(handle
);
589 struct hclge_comm_rss_cfg
*rss_cfg
= &hdev
->rss_cfg
;
592 if (hdev
->ae_dev
->dev_version
>= HNAE3_DEVICE_VERSION_V2
) {
593 ret
= hclge_comm_set_rss_hash_key(rss_cfg
, &hdev
->hw
.hw
, key
,
599 /* update the shadow RSS table with user specified qids */
600 for (i
= 0; i
< hdev
->ae_dev
->dev_specs
.rss_ind_tbl_size
; i
++)
601 rss_cfg
->rss_indirection_tbl
[i
] = indir
[i
];
603 /* update the hardware */
604 return hclge_comm_set_rss_indir_table(hdev
->ae_dev
, &hdev
->hw
.hw
,
605 rss_cfg
->rss_indirection_tbl
);
608 static int hclgevf_set_rss_tuple(struct hnae3_handle
*handle
,
609 struct ethtool_rxnfc
*nfc
)
611 struct hclgevf_dev
*hdev
= hclgevf_ae_get_hdev(handle
);
614 if (hdev
->ae_dev
->dev_version
< HNAE3_DEVICE_VERSION_V2
)
617 ret
= hclge_comm_set_rss_tuple(hdev
->ae_dev
, &hdev
->hw
.hw
,
618 &hdev
->rss_cfg
, nfc
);
620 dev_err(&hdev
->pdev
->dev
,
621 "failed to set rss tuple, ret = %d.\n", ret
);
626 static int hclgevf_get_rss_tuple(struct hnae3_handle
*handle
,
627 struct ethtool_rxnfc
*nfc
)
629 struct hclgevf_dev
*hdev
= hclgevf_ae_get_hdev(handle
);
633 if (hdev
->ae_dev
->dev_version
< HNAE3_DEVICE_VERSION_V2
)
638 ret
= hclge_comm_get_rss_tuple(&hdev
->rss_cfg
, nfc
->flow_type
,
640 if (ret
|| !tuple_sets
)
643 nfc
->data
= hclge_comm_convert_rss_tuple(tuple_sets
);
648 static int hclgevf_get_tc_size(struct hnae3_handle
*handle
)
650 struct hclgevf_dev
*hdev
= hclgevf_ae_get_hdev(handle
);
651 struct hclge_comm_rss_cfg
*rss_cfg
= &hdev
->rss_cfg
;
653 return rss_cfg
->rss_size
;
656 static int hclgevf_bind_ring_to_vector(struct hnae3_handle
*handle
, bool en
,
658 struct hnae3_ring_chain_node
*ring_chain
)
660 struct hclgevf_dev
*hdev
= hclgevf_ae_get_hdev(handle
);
661 struct hclge_vf_to_pf_msg send_msg
;
662 struct hnae3_ring_chain_node
*node
;
666 memset(&send_msg
, 0, sizeof(send_msg
));
667 send_msg
.code
= en
? HCLGE_MBX_MAP_RING_TO_VECTOR
:
668 HCLGE_MBX_UNMAP_RING_TO_VECTOR
;
669 send_msg
.vector_id
= vector_id
;
671 for (node
= ring_chain
; node
; node
= node
->next
) {
672 send_msg
.param
[i
].ring_type
=
673 hnae3_get_bit(node
->flag
, HNAE3_RING_TYPE_B
);
675 send_msg
.param
[i
].tqp_index
= node
->tqp_index
;
676 send_msg
.param
[i
].int_gl_index
=
677 hnae3_get_field(node
->int_gl_idx
,
679 HNAE3_RING_GL_IDX_S
);
682 if (i
== HCLGE_MBX_MAX_RING_CHAIN_PARAM_NUM
|| !node
->next
) {
683 send_msg
.ring_num
= i
;
685 status
= hclgevf_send_mbx_msg(hdev
, &send_msg
, false,
688 dev_err(&hdev
->pdev
->dev
,
689 "Map TQP fail, status is %d.\n",
700 static int hclgevf_map_ring_to_vector(struct hnae3_handle
*handle
, int vector
,
701 struct hnae3_ring_chain_node
*ring_chain
)
703 struct hclgevf_dev
*hdev
= hclgevf_ae_get_hdev(handle
);
706 vector_id
= hclgevf_get_vector_index(hdev
, vector
);
708 dev_err(&handle
->pdev
->dev
,
709 "Get vector index fail. ret =%d\n", vector_id
);
713 return hclgevf_bind_ring_to_vector(handle
, true, vector_id
, ring_chain
);
716 static int hclgevf_unmap_ring_from_vector(
717 struct hnae3_handle
*handle
,
719 struct hnae3_ring_chain_node
*ring_chain
)
721 struct hclgevf_dev
*hdev
= hclgevf_ae_get_hdev(handle
);
724 if (test_bit(HCLGEVF_STATE_RST_HANDLING
, &hdev
->state
))
727 vector_id
= hclgevf_get_vector_index(hdev
, vector
);
729 dev_err(&handle
->pdev
->dev
,
730 "Get vector index fail. ret =%d\n", vector_id
);
734 ret
= hclgevf_bind_ring_to_vector(handle
, false, vector_id
, ring_chain
);
736 dev_err(&handle
->pdev
->dev
,
737 "Unmap ring from vector fail. vector=%d, ret =%d\n",
744 static int hclgevf_put_vector(struct hnae3_handle
*handle
, int vector
)
746 struct hclgevf_dev
*hdev
= hclgevf_ae_get_hdev(handle
);
749 vector_id
= hclgevf_get_vector_index(hdev
, vector
);
751 dev_err(&handle
->pdev
->dev
,
752 "hclgevf_put_vector get vector index fail. ret =%d\n",
757 hclgevf_free_vector(hdev
, vector_id
);
762 static int hclgevf_cmd_set_promisc_mode(struct hclgevf_dev
*hdev
,
763 bool en_uc_pmc
, bool en_mc_pmc
,
766 struct hnae3_handle
*handle
= &hdev
->nic
;
767 struct hclge_vf_to_pf_msg send_msg
;
770 memset(&send_msg
, 0, sizeof(send_msg
));
771 send_msg
.code
= HCLGE_MBX_SET_PROMISC_MODE
;
772 send_msg
.en_bc
= en_bc_pmc
? 1 : 0;
773 send_msg
.en_uc
= en_uc_pmc
? 1 : 0;
774 send_msg
.en_mc
= en_mc_pmc
? 1 : 0;
775 send_msg
.en_limit_promisc
= test_bit(HNAE3_PFLAG_LIMIT_PROMISC
,
776 &handle
->priv_flags
) ? 1 : 0;
778 ret
= hclgevf_send_mbx_msg(hdev
, &send_msg
, false, NULL
, 0);
780 dev_err(&hdev
->pdev
->dev
,
781 "Set promisc mode fail, status is %d.\n", ret
);
786 static int hclgevf_set_promisc_mode(struct hnae3_handle
*handle
, bool en_uc_pmc
,
789 struct hclgevf_dev
*hdev
= hclgevf_ae_get_hdev(handle
);
792 en_bc_pmc
= hdev
->ae_dev
->dev_version
>= HNAE3_DEVICE_VERSION_V2
;
794 return hclgevf_cmd_set_promisc_mode(hdev
, en_uc_pmc
, en_mc_pmc
,
798 static void hclgevf_request_update_promisc_mode(struct hnae3_handle
*handle
)
800 struct hclgevf_dev
*hdev
= hclgevf_ae_get_hdev(handle
);
802 set_bit(HCLGEVF_STATE_PROMISC_CHANGED
, &hdev
->state
);
803 hclgevf_task_schedule(hdev
, 0);
806 static void hclgevf_sync_promisc_mode(struct hclgevf_dev
*hdev
)
808 struct hnae3_handle
*handle
= &hdev
->nic
;
809 bool en_uc_pmc
= handle
->netdev_flags
& HNAE3_UPE
;
810 bool en_mc_pmc
= handle
->netdev_flags
& HNAE3_MPE
;
813 if (test_bit(HCLGEVF_STATE_PROMISC_CHANGED
, &hdev
->state
)) {
814 ret
= hclgevf_set_promisc_mode(handle
, en_uc_pmc
, en_mc_pmc
);
816 clear_bit(HCLGEVF_STATE_PROMISC_CHANGED
, &hdev
->state
);
820 static int hclgevf_tqp_enable_cmd_send(struct hclgevf_dev
*hdev
, u16 tqp_id
,
821 u16 stream_id
, bool enable
)
823 struct hclgevf_cfg_com_tqp_queue_cmd
*req
;
824 struct hclge_desc desc
;
826 req
= (struct hclgevf_cfg_com_tqp_queue_cmd
*)desc
.data
;
828 hclgevf_cmd_setup_basic_desc(&desc
, HCLGE_OPC_CFG_COM_TQP_QUEUE
, false);
829 req
->tqp_id
= cpu_to_le16(tqp_id
& HCLGEVF_RING_ID_MASK
);
830 req
->stream_id
= cpu_to_le16(stream_id
);
832 req
->enable
|= 1U << HCLGEVF_TQP_ENABLE_B
;
834 return hclgevf_cmd_send(&hdev
->hw
, &desc
, 1);
837 static int hclgevf_tqp_enable(struct hnae3_handle
*handle
, bool enable
)
839 struct hclgevf_dev
*hdev
= hclgevf_ae_get_hdev(handle
);
843 for (i
= 0; i
< handle
->kinfo
.num_tqps
; i
++) {
844 ret
= hclgevf_tqp_enable_cmd_send(hdev
, i
, 0, enable
);
852 static int hclgevf_get_host_mac_addr(struct hclgevf_dev
*hdev
, u8
*p
)
854 struct hclge_vf_to_pf_msg send_msg
;
855 u8 host_mac
[ETH_ALEN
];
858 hclgevf_build_send_msg(&send_msg
, HCLGE_MBX_GET_MAC_ADDR
, 0);
859 status
= hclgevf_send_mbx_msg(hdev
, &send_msg
, true, host_mac
,
862 dev_err(&hdev
->pdev
->dev
,
863 "fail to get VF MAC from host %d", status
);
867 ether_addr_copy(p
, host_mac
);
872 static void hclgevf_get_mac_addr(struct hnae3_handle
*handle
, u8
*p
)
874 struct hclgevf_dev
*hdev
= hclgevf_ae_get_hdev(handle
);
875 u8 host_mac_addr
[ETH_ALEN
];
877 if (hclgevf_get_host_mac_addr(hdev
, host_mac_addr
))
880 hdev
->has_pf_mac
= !is_zero_ether_addr(host_mac_addr
);
881 if (hdev
->has_pf_mac
)
882 ether_addr_copy(p
, host_mac_addr
);
884 ether_addr_copy(p
, hdev
->hw
.mac
.mac_addr
);
887 static int hclgevf_set_mac_addr(struct hnae3_handle
*handle
, const void *p
,
890 struct hclgevf_dev
*hdev
= hclgevf_ae_get_hdev(handle
);
891 u8
*old_mac_addr
= (u8
*)hdev
->hw
.mac
.mac_addr
;
892 struct hclge_vf_to_pf_msg send_msg
;
893 u8
*new_mac_addr
= (u8
*)p
;
896 hclgevf_build_send_msg(&send_msg
, HCLGE_MBX_SET_UNICAST
, 0);
897 send_msg
.subcode
= HCLGE_MBX_MAC_VLAN_UC_MODIFY
;
898 ether_addr_copy(send_msg
.data
, new_mac_addr
);
899 if (is_first
&& !hdev
->has_pf_mac
)
900 eth_zero_addr(&send_msg
.data
[ETH_ALEN
]);
902 ether_addr_copy(&send_msg
.data
[ETH_ALEN
], old_mac_addr
);
903 status
= hclgevf_send_mbx_msg(hdev
, &send_msg
, true, NULL
, 0);
905 ether_addr_copy(hdev
->hw
.mac
.mac_addr
, new_mac_addr
);
910 static struct hclgevf_mac_addr_node
*
911 hclgevf_find_mac_node(struct list_head
*list
, const u8
*mac_addr
)
913 struct hclgevf_mac_addr_node
*mac_node
, *tmp
;
915 list_for_each_entry_safe(mac_node
, tmp
, list
, node
)
916 if (ether_addr_equal(mac_addr
, mac_node
->mac_addr
))
922 static void hclgevf_update_mac_node(struct hclgevf_mac_addr_node
*mac_node
,
923 enum HCLGEVF_MAC_NODE_STATE state
)
926 /* from set_rx_mode or tmp_add_list */
927 case HCLGEVF_MAC_TO_ADD
:
928 if (mac_node
->state
== HCLGEVF_MAC_TO_DEL
)
929 mac_node
->state
= HCLGEVF_MAC_ACTIVE
;
931 /* only from set_rx_mode */
932 case HCLGEVF_MAC_TO_DEL
:
933 if (mac_node
->state
== HCLGEVF_MAC_TO_ADD
) {
934 list_del(&mac_node
->node
);
937 mac_node
->state
= HCLGEVF_MAC_TO_DEL
;
940 /* only from tmp_add_list, the mac_node->state won't be
943 case HCLGEVF_MAC_ACTIVE
:
944 if (mac_node
->state
== HCLGEVF_MAC_TO_ADD
)
945 mac_node
->state
= HCLGEVF_MAC_ACTIVE
;
950 static int hclgevf_update_mac_list(struct hnae3_handle
*handle
,
951 enum HCLGEVF_MAC_NODE_STATE state
,
952 enum HCLGEVF_MAC_ADDR_TYPE mac_type
,
953 const unsigned char *addr
)
955 struct hclgevf_dev
*hdev
= hclgevf_ae_get_hdev(handle
);
956 struct hclgevf_mac_addr_node
*mac_node
;
957 struct list_head
*list
;
959 list
= (mac_type
== HCLGEVF_MAC_ADDR_UC
) ?
960 &hdev
->mac_table
.uc_mac_list
: &hdev
->mac_table
.mc_mac_list
;
962 spin_lock_bh(&hdev
->mac_table
.mac_list_lock
);
964 /* if the mac addr is already in the mac list, no need to add a new
965 * one into it, just check the mac addr state, convert it to a new
966 * state, or just remove it, or do nothing.
968 mac_node
= hclgevf_find_mac_node(list
, addr
);
970 hclgevf_update_mac_node(mac_node
, state
);
971 spin_unlock_bh(&hdev
->mac_table
.mac_list_lock
);
974 /* if this address is never added, unnecessary to delete */
975 if (state
== HCLGEVF_MAC_TO_DEL
) {
976 spin_unlock_bh(&hdev
->mac_table
.mac_list_lock
);
980 mac_node
= kzalloc(sizeof(*mac_node
), GFP_ATOMIC
);
982 spin_unlock_bh(&hdev
->mac_table
.mac_list_lock
);
986 mac_node
->state
= state
;
987 ether_addr_copy(mac_node
->mac_addr
, addr
);
988 list_add_tail(&mac_node
->node
, list
);
990 spin_unlock_bh(&hdev
->mac_table
.mac_list_lock
);
994 static int hclgevf_add_uc_addr(struct hnae3_handle
*handle
,
995 const unsigned char *addr
)
997 return hclgevf_update_mac_list(handle
, HCLGEVF_MAC_TO_ADD
,
998 HCLGEVF_MAC_ADDR_UC
, addr
);
1001 static int hclgevf_rm_uc_addr(struct hnae3_handle
*handle
,
1002 const unsigned char *addr
)
1004 return hclgevf_update_mac_list(handle
, HCLGEVF_MAC_TO_DEL
,
1005 HCLGEVF_MAC_ADDR_UC
, addr
);
1008 static int hclgevf_add_mc_addr(struct hnae3_handle
*handle
,
1009 const unsigned char *addr
)
1011 return hclgevf_update_mac_list(handle
, HCLGEVF_MAC_TO_ADD
,
1012 HCLGEVF_MAC_ADDR_MC
, addr
);
1015 static int hclgevf_rm_mc_addr(struct hnae3_handle
*handle
,
1016 const unsigned char *addr
)
1018 return hclgevf_update_mac_list(handle
, HCLGEVF_MAC_TO_DEL
,
1019 HCLGEVF_MAC_ADDR_MC
, addr
);
1022 static int hclgevf_add_del_mac_addr(struct hclgevf_dev
*hdev
,
1023 struct hclgevf_mac_addr_node
*mac_node
,
1024 enum HCLGEVF_MAC_ADDR_TYPE mac_type
)
1026 struct hclge_vf_to_pf_msg send_msg
;
1029 if (mac_type
== HCLGEVF_MAC_ADDR_UC
) {
1030 code
= HCLGE_MBX_SET_UNICAST
;
1031 if (mac_node
->state
== HCLGEVF_MAC_TO_ADD
)
1032 subcode
= HCLGE_MBX_MAC_VLAN_UC_ADD
;
1034 subcode
= HCLGE_MBX_MAC_VLAN_UC_REMOVE
;
1036 code
= HCLGE_MBX_SET_MULTICAST
;
1037 if (mac_node
->state
== HCLGEVF_MAC_TO_ADD
)
1038 subcode
= HCLGE_MBX_MAC_VLAN_MC_ADD
;
1040 subcode
= HCLGE_MBX_MAC_VLAN_MC_REMOVE
;
1043 hclgevf_build_send_msg(&send_msg
, code
, subcode
);
1044 ether_addr_copy(send_msg
.data
, mac_node
->mac_addr
);
1045 return hclgevf_send_mbx_msg(hdev
, &send_msg
, false, NULL
, 0);
1048 static void hclgevf_config_mac_list(struct hclgevf_dev
*hdev
,
1049 struct list_head
*list
,
1050 enum HCLGEVF_MAC_ADDR_TYPE mac_type
)
1052 char format_mac_addr
[HNAE3_FORMAT_MAC_ADDR_LEN
];
1053 struct hclgevf_mac_addr_node
*mac_node
, *tmp
;
1056 list_for_each_entry_safe(mac_node
, tmp
, list
, node
) {
1057 ret
= hclgevf_add_del_mac_addr(hdev
, mac_node
, mac_type
);
1059 hnae3_format_mac_addr(format_mac_addr
,
1060 mac_node
->mac_addr
);
1061 dev_err(&hdev
->pdev
->dev
,
1062 "failed to configure mac %s, state = %d, ret = %d\n",
1063 format_mac_addr
, mac_node
->state
, ret
);
1066 if (mac_node
->state
== HCLGEVF_MAC_TO_ADD
) {
1067 mac_node
->state
= HCLGEVF_MAC_ACTIVE
;
1069 list_del(&mac_node
->node
);
1075 static void hclgevf_sync_from_add_list(struct list_head
*add_list
,
1076 struct list_head
*mac_list
)
1078 struct hclgevf_mac_addr_node
*mac_node
, *tmp
, *new_node
;
1080 list_for_each_entry_safe(mac_node
, tmp
, add_list
, node
) {
1081 /* if the mac address from tmp_add_list is not in the
1082 * uc/mc_mac_list, it means have received a TO_DEL request
1083 * during the time window of sending mac config request to PF
1084 * If mac_node state is ACTIVE, then change its state to TO_DEL,
1085 * then it will be removed at next time. If is TO_ADD, it means
1086 * send TO_ADD request failed, so just remove the mac node.
1088 new_node
= hclgevf_find_mac_node(mac_list
, mac_node
->mac_addr
);
1090 hclgevf_update_mac_node(new_node
, mac_node
->state
);
1091 list_del(&mac_node
->node
);
1093 } else if (mac_node
->state
== HCLGEVF_MAC_ACTIVE
) {
1094 mac_node
->state
= HCLGEVF_MAC_TO_DEL
;
1095 list_move_tail(&mac_node
->node
, mac_list
);
1097 list_del(&mac_node
->node
);
1103 static void hclgevf_sync_from_del_list(struct list_head
*del_list
,
1104 struct list_head
*mac_list
)
1106 struct hclgevf_mac_addr_node
*mac_node
, *tmp
, *new_node
;
1108 list_for_each_entry_safe(mac_node
, tmp
, del_list
, node
) {
1109 new_node
= hclgevf_find_mac_node(mac_list
, mac_node
->mac_addr
);
1111 /* If the mac addr is exist in the mac list, it means
1112 * received a new request TO_ADD during the time window
1113 * of sending mac addr configurrequest to PF, so just
1114 * change the mac state to ACTIVE.
1116 new_node
->state
= HCLGEVF_MAC_ACTIVE
;
1117 list_del(&mac_node
->node
);
1120 list_move_tail(&mac_node
->node
, mac_list
);
1125 static void hclgevf_clear_list(struct list_head
*list
)
1127 struct hclgevf_mac_addr_node
*mac_node
, *tmp
;
1129 list_for_each_entry_safe(mac_node
, tmp
, list
, node
) {
1130 list_del(&mac_node
->node
);
1135 static void hclgevf_sync_mac_list(struct hclgevf_dev
*hdev
,
1136 enum HCLGEVF_MAC_ADDR_TYPE mac_type
)
1138 struct hclgevf_mac_addr_node
*mac_node
, *tmp
, *new_node
;
1139 struct list_head tmp_add_list
, tmp_del_list
;
1140 struct list_head
*list
;
1142 INIT_LIST_HEAD(&tmp_add_list
);
1143 INIT_LIST_HEAD(&tmp_del_list
);
1145 /* move the mac addr to the tmp_add_list and tmp_del_list, then
1146 * we can add/delete these mac addr outside the spin lock
1148 list
= (mac_type
== HCLGEVF_MAC_ADDR_UC
) ?
1149 &hdev
->mac_table
.uc_mac_list
: &hdev
->mac_table
.mc_mac_list
;
1151 spin_lock_bh(&hdev
->mac_table
.mac_list_lock
);
1153 list_for_each_entry_safe(mac_node
, tmp
, list
, node
) {
1154 switch (mac_node
->state
) {
1155 case HCLGEVF_MAC_TO_DEL
:
1156 list_move_tail(&mac_node
->node
, &tmp_del_list
);
1158 case HCLGEVF_MAC_TO_ADD
:
1159 new_node
= kzalloc(sizeof(*new_node
), GFP_ATOMIC
);
1163 ether_addr_copy(new_node
->mac_addr
, mac_node
->mac_addr
);
1164 new_node
->state
= mac_node
->state
;
1165 list_add_tail(&new_node
->node
, &tmp_add_list
);
1173 spin_unlock_bh(&hdev
->mac_table
.mac_list_lock
);
1175 /* delete first, in order to get max mac table space for adding */
1176 hclgevf_config_mac_list(hdev
, &tmp_del_list
, mac_type
);
1177 hclgevf_config_mac_list(hdev
, &tmp_add_list
, mac_type
);
1179 /* if some mac addresses were added/deleted fail, move back to the
1180 * mac_list, and retry at next time.
1182 spin_lock_bh(&hdev
->mac_table
.mac_list_lock
);
1184 hclgevf_sync_from_del_list(&tmp_del_list
, list
);
1185 hclgevf_sync_from_add_list(&tmp_add_list
, list
);
1187 spin_unlock_bh(&hdev
->mac_table
.mac_list_lock
);
1190 static void hclgevf_sync_mac_table(struct hclgevf_dev
*hdev
)
1192 hclgevf_sync_mac_list(hdev
, HCLGEVF_MAC_ADDR_UC
);
1193 hclgevf_sync_mac_list(hdev
, HCLGEVF_MAC_ADDR_MC
);
1196 static void hclgevf_uninit_mac_list(struct hclgevf_dev
*hdev
)
1198 spin_lock_bh(&hdev
->mac_table
.mac_list_lock
);
1200 hclgevf_clear_list(&hdev
->mac_table
.uc_mac_list
);
1201 hclgevf_clear_list(&hdev
->mac_table
.mc_mac_list
);
1203 spin_unlock_bh(&hdev
->mac_table
.mac_list_lock
);
1206 static int hclgevf_enable_vlan_filter(struct hnae3_handle
*handle
, bool enable
)
1208 struct hclgevf_dev
*hdev
= hclgevf_ae_get_hdev(handle
);
1209 struct hnae3_ae_dev
*ae_dev
= hdev
->ae_dev
;
1210 struct hclge_vf_to_pf_msg send_msg
;
1212 if (!test_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B
, ae_dev
->caps
))
1215 hclgevf_build_send_msg(&send_msg
, HCLGE_MBX_SET_VLAN
,
1216 HCLGE_MBX_ENABLE_VLAN_FILTER
);
1217 send_msg
.data
[0] = enable
? 1 : 0;
1219 return hclgevf_send_mbx_msg(hdev
, &send_msg
, true, NULL
, 0);
1222 static int hclgevf_set_vlan_filter(struct hnae3_handle
*handle
,
1223 __be16 proto
, u16 vlan_id
,
1226 struct hclgevf_dev
*hdev
= hclgevf_ae_get_hdev(handle
);
1227 struct hclge_mbx_vlan_filter
*vlan_filter
;
1228 struct hclge_vf_to_pf_msg send_msg
;
1231 if (vlan_id
> HCLGEVF_MAX_VLAN_ID
)
1234 if (proto
!= htons(ETH_P_8021Q
))
1235 return -EPROTONOSUPPORT
;
1237 /* When device is resetting or reset failed, firmware is unable to
1238 * handle mailbox. Just record the vlan id, and remove it after
1241 if ((test_bit(HCLGEVF_STATE_RST_HANDLING
, &hdev
->state
) ||
1242 test_bit(HCLGEVF_STATE_RST_FAIL
, &hdev
->state
)) && is_kill
) {
1243 set_bit(vlan_id
, hdev
->vlan_del_fail_bmap
);
1245 } else if (!is_kill
&& test_bit(vlan_id
, hdev
->vlan_del_fail_bmap
)) {
1246 clear_bit(vlan_id
, hdev
->vlan_del_fail_bmap
);
1249 hclgevf_build_send_msg(&send_msg
, HCLGE_MBX_SET_VLAN
,
1250 HCLGE_MBX_VLAN_FILTER
);
1251 vlan_filter
= (struct hclge_mbx_vlan_filter
*)send_msg
.data
;
1252 vlan_filter
->is_kill
= is_kill
;
1253 vlan_filter
->vlan_id
= cpu_to_le16(vlan_id
);
1254 vlan_filter
->proto
= cpu_to_le16(be16_to_cpu(proto
));
1256 /* when remove hw vlan filter failed, record the vlan id,
1257 * and try to remove it from hw later, to be consistence
1260 ret
= hclgevf_send_mbx_msg(hdev
, &send_msg
, true, NULL
, 0);
1262 set_bit(vlan_id
, hdev
->vlan_del_fail_bmap
);
1267 static void hclgevf_sync_vlan_filter(struct hclgevf_dev
*hdev
)
1269 #define HCLGEVF_MAX_SYNC_COUNT 60
1270 struct hnae3_handle
*handle
= &hdev
->nic
;
1271 int ret
, sync_cnt
= 0;
1274 if (bitmap_empty(hdev
->vlan_del_fail_bmap
, VLAN_N_VID
))
1278 vlan_id
= find_first_bit(hdev
->vlan_del_fail_bmap
, VLAN_N_VID
);
1279 while (vlan_id
!= VLAN_N_VID
) {
1280 ret
= hclgevf_set_vlan_filter(handle
, htons(ETH_P_8021Q
),
1285 clear_bit(vlan_id
, hdev
->vlan_del_fail_bmap
);
1287 if (sync_cnt
>= HCLGEVF_MAX_SYNC_COUNT
)
1290 vlan_id
= find_first_bit(hdev
->vlan_del_fail_bmap
, VLAN_N_VID
);
1295 static int hclgevf_en_hw_strip_rxvtag(struct hnae3_handle
*handle
, bool enable
)
1297 struct hclgevf_dev
*hdev
= hclgevf_ae_get_hdev(handle
);
1298 struct hclge_vf_to_pf_msg send_msg
;
1300 hclgevf_build_send_msg(&send_msg
, HCLGE_MBX_SET_VLAN
,
1301 HCLGE_MBX_VLAN_RX_OFF_CFG
);
1302 send_msg
.data
[0] = enable
? 1 : 0;
1303 return hclgevf_send_mbx_msg(hdev
, &send_msg
, false, NULL
, 0);
1306 static int hclgevf_reset_tqp(struct hnae3_handle
*handle
)
1308 #define HCLGEVF_RESET_ALL_QUEUE_DONE 1U
1309 struct hclgevf_dev
*hdev
= hclgevf_ae_get_hdev(handle
);
1310 struct hclge_vf_to_pf_msg send_msg
;
1311 u8 return_status
= 0;
1315 /* disable vf queue before send queue reset msg to PF */
1316 ret
= hclgevf_tqp_enable(handle
, false);
1318 dev_err(&hdev
->pdev
->dev
, "failed to disable tqp, ret = %d\n",
1323 hclgevf_build_send_msg(&send_msg
, HCLGE_MBX_QUEUE_RESET
, 0);
1325 ret
= hclgevf_send_mbx_msg(hdev
, &send_msg
, true, &return_status
,
1326 sizeof(return_status
));
1327 if (ret
|| return_status
== HCLGEVF_RESET_ALL_QUEUE_DONE
)
1330 for (i
= 1; i
< handle
->kinfo
.num_tqps
; i
++) {
1331 hclgevf_build_send_msg(&send_msg
, HCLGE_MBX_QUEUE_RESET
, 0);
1332 *(__le16
*)send_msg
.data
= cpu_to_le16(i
);
1333 ret
= hclgevf_send_mbx_msg(hdev
, &send_msg
, true, NULL
, 0);
1341 static int hclgevf_set_mtu(struct hnae3_handle
*handle
, int new_mtu
)
1343 struct hclgevf_dev
*hdev
= hclgevf_ae_get_hdev(handle
);
1344 struct hclge_mbx_mtu_info
*mtu_info
;
1345 struct hclge_vf_to_pf_msg send_msg
;
1347 hclgevf_build_send_msg(&send_msg
, HCLGE_MBX_SET_MTU
, 0);
1348 mtu_info
= (struct hclge_mbx_mtu_info
*)send_msg
.data
;
1349 mtu_info
->mtu
= cpu_to_le32(new_mtu
);
1351 return hclgevf_send_mbx_msg(hdev
, &send_msg
, true, NULL
, 0);
1354 static int hclgevf_notify_client(struct hclgevf_dev
*hdev
,
1355 enum hnae3_reset_notify_type type
)
1357 struct hnae3_client
*client
= hdev
->nic_client
;
1358 struct hnae3_handle
*handle
= &hdev
->nic
;
1361 if (!test_bit(HCLGEVF_STATE_NIC_REGISTERED
, &hdev
->state
) ||
1365 if (!client
->ops
->reset_notify
)
1368 ret
= client
->ops
->reset_notify(handle
, type
);
1370 dev_err(&hdev
->pdev
->dev
, "notify nic client failed %d(%d)\n",
1376 static int hclgevf_notify_roce_client(struct hclgevf_dev
*hdev
,
1377 enum hnae3_reset_notify_type type
)
1379 struct hnae3_client
*client
= hdev
->roce_client
;
1380 struct hnae3_handle
*handle
= &hdev
->roce
;
1383 if (!test_bit(HCLGEVF_STATE_ROCE_REGISTERED
, &hdev
->state
) || !client
)
1386 if (!client
->ops
->reset_notify
)
1389 ret
= client
->ops
->reset_notify(handle
, type
);
1391 dev_err(&hdev
->pdev
->dev
, "notify roce client failed %d(%d)",
1396 static int hclgevf_reset_wait(struct hclgevf_dev
*hdev
)
1398 #define HCLGEVF_RESET_WAIT_US 20000
1399 #define HCLGEVF_RESET_WAIT_CNT 2000
1400 #define HCLGEVF_RESET_WAIT_TIMEOUT_US \
1401 (HCLGEVF_RESET_WAIT_US * HCLGEVF_RESET_WAIT_CNT)
1406 if (hdev
->reset_type
== HNAE3_VF_RESET
)
1407 ret
= readl_poll_timeout(hdev
->hw
.hw
.io_base
+
1408 HCLGEVF_VF_RST_ING
, val
,
1409 !(val
& HCLGEVF_VF_RST_ING_BIT
),
1410 HCLGEVF_RESET_WAIT_US
,
1411 HCLGEVF_RESET_WAIT_TIMEOUT_US
);
1413 ret
= readl_poll_timeout(hdev
->hw
.hw
.io_base
+
1414 HCLGEVF_RST_ING
, val
,
1415 !(val
& HCLGEVF_RST_ING_BITS
),
1416 HCLGEVF_RESET_WAIT_US
,
1417 HCLGEVF_RESET_WAIT_TIMEOUT_US
);
1419 /* hardware completion status should be available by this time */
1421 dev_err(&hdev
->pdev
->dev
,
1422 "couldn't get reset done status from h/w, timeout!\n");
1426 /* we will wait a bit more to let reset of the stack to complete. This
1427 * might happen in case reset assertion was made by PF. Yes, this also
1428 * means we might end up waiting bit more even for VF reset.
1430 if (hdev
->reset_type
== HNAE3_VF_FULL_RESET
)
1438 static void hclgevf_reset_handshake(struct hclgevf_dev
*hdev
, bool enable
)
1442 reg_val
= hclgevf_read_dev(&hdev
->hw
, HCLGE_COMM_NIC_CSQ_DEPTH_REG
);
1444 reg_val
|= HCLGEVF_NIC_SW_RST_RDY
;
1446 reg_val
&= ~HCLGEVF_NIC_SW_RST_RDY
;
1448 hclgevf_write_dev(&hdev
->hw
, HCLGE_COMM_NIC_CSQ_DEPTH_REG
,
1452 static int hclgevf_reset_stack(struct hclgevf_dev
*hdev
)
1456 /* uninitialize the nic client */
1457 ret
= hclgevf_notify_client(hdev
, HNAE3_UNINIT_CLIENT
);
1461 /* re-initialize the hclge device */
1462 ret
= hclgevf_reset_hdev(hdev
);
1464 dev_err(&hdev
->pdev
->dev
,
1465 "hclge device re-init failed, VF is disabled!\n");
1469 /* bring up the nic client again */
1470 ret
= hclgevf_notify_client(hdev
, HNAE3_INIT_CLIENT
);
1474 /* clear handshake status with IMP */
1475 hclgevf_reset_handshake(hdev
, false);
1477 /* bring up the nic to enable TX/RX again */
1478 return hclgevf_notify_client(hdev
, HNAE3_UP_CLIENT
);
1481 static int hclgevf_reset_prepare_wait(struct hclgevf_dev
*hdev
)
1483 #define HCLGEVF_RESET_SYNC_TIME 100
1485 if (hdev
->reset_type
== HNAE3_VF_FUNC_RESET
) {
1486 struct hclge_vf_to_pf_msg send_msg
;
1489 hclgevf_build_send_msg(&send_msg
, HCLGE_MBX_RESET
, 0);
1490 ret
= hclgevf_send_mbx_msg(hdev
, &send_msg
, true, NULL
, 0);
1492 dev_err(&hdev
->pdev
->dev
,
1493 "failed to assert VF reset, ret = %d\n", ret
);
1496 hdev
->rst_stats
.vf_func_rst_cnt
++;
1499 set_bit(HCLGE_COMM_STATE_CMD_DISABLE
, &hdev
->hw
.hw
.comm_state
);
1500 /* inform hardware that preparatory work is done */
1501 msleep(HCLGEVF_RESET_SYNC_TIME
);
1502 hclgevf_reset_handshake(hdev
, true);
1503 dev_info(&hdev
->pdev
->dev
, "prepare reset(%d) wait done\n",
1509 static void hclgevf_dump_rst_info(struct hclgevf_dev
*hdev
)
1511 dev_info(&hdev
->pdev
->dev
, "VF function reset count: %u\n",
1512 hdev
->rst_stats
.vf_func_rst_cnt
);
1513 dev_info(&hdev
->pdev
->dev
, "FLR reset count: %u\n",
1514 hdev
->rst_stats
.flr_rst_cnt
);
1515 dev_info(&hdev
->pdev
->dev
, "VF reset count: %u\n",
1516 hdev
->rst_stats
.vf_rst_cnt
);
1517 dev_info(&hdev
->pdev
->dev
, "reset done count: %u\n",
1518 hdev
->rst_stats
.rst_done_cnt
);
1519 dev_info(&hdev
->pdev
->dev
, "HW reset done count: %u\n",
1520 hdev
->rst_stats
.hw_rst_done_cnt
);
1521 dev_info(&hdev
->pdev
->dev
, "reset count: %u\n",
1522 hdev
->rst_stats
.rst_cnt
);
1523 dev_info(&hdev
->pdev
->dev
, "reset fail count: %u\n",
1524 hdev
->rst_stats
.rst_fail_cnt
);
1525 dev_info(&hdev
->pdev
->dev
, "vector0 interrupt enable status: 0x%x\n",
1526 hclgevf_read_dev(&hdev
->hw
, HCLGEVF_MISC_VECTOR_REG_BASE
));
1527 dev_info(&hdev
->pdev
->dev
, "vector0 interrupt status: 0x%x\n",
1528 hclgevf_read_dev(&hdev
->hw
, HCLGE_COMM_VECTOR0_CMDQ_STATE_REG
));
1529 dev_info(&hdev
->pdev
->dev
, "handshake status: 0x%x\n",
1530 hclgevf_read_dev(&hdev
->hw
, HCLGE_COMM_NIC_CSQ_DEPTH_REG
));
1531 dev_info(&hdev
->pdev
->dev
, "function reset status: 0x%x\n",
1532 hclgevf_read_dev(&hdev
->hw
, HCLGEVF_RST_ING
));
1533 dev_info(&hdev
->pdev
->dev
, "hdev state: 0x%lx\n", hdev
->state
);
1536 static void hclgevf_reset_err_handle(struct hclgevf_dev
*hdev
)
1538 /* recover handshake status with IMP when reset fail */
1539 hclgevf_reset_handshake(hdev
, true);
1540 hdev
->rst_stats
.rst_fail_cnt
++;
1541 dev_err(&hdev
->pdev
->dev
, "failed to reset VF(%u)\n",
1542 hdev
->rst_stats
.rst_fail_cnt
);
1544 if (hdev
->rst_stats
.rst_fail_cnt
< HCLGEVF_RESET_MAX_FAIL_CNT
)
1545 set_bit(hdev
->reset_type
, &hdev
->reset_pending
);
1547 if (hclgevf_is_reset_pending(hdev
)) {
1548 set_bit(HCLGEVF_RESET_PENDING
, &hdev
->reset_state
);
1549 hclgevf_reset_task_schedule(hdev
);
1551 set_bit(HCLGEVF_STATE_RST_FAIL
, &hdev
->state
);
1552 hclgevf_dump_rst_info(hdev
);
1556 static int hclgevf_reset_prepare(struct hclgevf_dev
*hdev
)
1560 hdev
->rst_stats
.rst_cnt
++;
1562 /* perform reset of the stack & ae device for a client */
1563 ret
= hclgevf_notify_roce_client(hdev
, HNAE3_DOWN_CLIENT
);
1568 /* bring down the nic to stop any ongoing TX/RX */
1569 ret
= hclgevf_notify_client(hdev
, HNAE3_DOWN_CLIENT
);
1574 return hclgevf_reset_prepare_wait(hdev
);
1577 static int hclgevf_reset_rebuild(struct hclgevf_dev
*hdev
)
1581 hdev
->rst_stats
.hw_rst_done_cnt
++;
1582 ret
= hclgevf_notify_roce_client(hdev
, HNAE3_UNINIT_CLIENT
);
1587 /* now, re-initialize the nic client and ae device */
1588 ret
= hclgevf_reset_stack(hdev
);
1591 dev_err(&hdev
->pdev
->dev
, "failed to reset VF stack\n");
1595 ret
= hclgevf_notify_roce_client(hdev
, HNAE3_INIT_CLIENT
);
1596 /* ignore RoCE notify error if it fails HCLGEVF_RESET_MAX_FAIL_CNT - 1
1600 hdev
->rst_stats
.rst_fail_cnt
< HCLGEVF_RESET_MAX_FAIL_CNT
- 1)
1603 ret
= hclgevf_notify_roce_client(hdev
, HNAE3_UP_CLIENT
);
1607 hdev
->last_reset_time
= jiffies
;
1608 hdev
->rst_stats
.rst_done_cnt
++;
1609 hdev
->rst_stats
.rst_fail_cnt
= 0;
1610 clear_bit(HCLGEVF_STATE_RST_FAIL
, &hdev
->state
);
1615 static void hclgevf_reset(struct hclgevf_dev
*hdev
)
1617 if (hclgevf_reset_prepare(hdev
))
1620 /* check if VF could successfully fetch the hardware reset completion
1621 * status from the hardware
1623 if (hclgevf_reset_wait(hdev
)) {
1624 /* can't do much in this situation, will disable VF */
1625 dev_err(&hdev
->pdev
->dev
,
1626 "failed to fetch H/W reset completion status\n");
1630 if (hclgevf_reset_rebuild(hdev
))
1636 hclgevf_reset_err_handle(hdev
);
1639 static enum hnae3_reset_type
hclgevf_get_reset_level(unsigned long *addr
)
1641 enum hnae3_reset_type rst_level
= HNAE3_NONE_RESET
;
1643 /* return the highest priority reset level amongst all */
1644 if (test_bit(HNAE3_VF_RESET
, addr
)) {
1645 rst_level
= HNAE3_VF_RESET
;
1646 clear_bit(HNAE3_VF_RESET
, addr
);
1647 clear_bit(HNAE3_VF_PF_FUNC_RESET
, addr
);
1648 clear_bit(HNAE3_VF_FUNC_RESET
, addr
);
1649 } else if (test_bit(HNAE3_VF_FULL_RESET
, addr
)) {
1650 rst_level
= HNAE3_VF_FULL_RESET
;
1651 clear_bit(HNAE3_VF_FULL_RESET
, addr
);
1652 clear_bit(HNAE3_VF_FUNC_RESET
, addr
);
1653 } else if (test_bit(HNAE3_VF_PF_FUNC_RESET
, addr
)) {
1654 rst_level
= HNAE3_VF_PF_FUNC_RESET
;
1655 clear_bit(HNAE3_VF_PF_FUNC_RESET
, addr
);
1656 clear_bit(HNAE3_VF_FUNC_RESET
, addr
);
1657 } else if (test_bit(HNAE3_VF_FUNC_RESET
, addr
)) {
1658 rst_level
= HNAE3_VF_FUNC_RESET
;
1659 clear_bit(HNAE3_VF_FUNC_RESET
, addr
);
1660 } else if (test_bit(HNAE3_FLR_RESET
, addr
)) {
1661 rst_level
= HNAE3_FLR_RESET
;
1662 clear_bit(HNAE3_FLR_RESET
, addr
);
1668 static void hclgevf_reset_event(struct pci_dev
*pdev
,
1669 struct hnae3_handle
*handle
)
1671 struct hnae3_ae_dev
*ae_dev
= pci_get_drvdata(pdev
);
1672 struct hclgevf_dev
*hdev
= ae_dev
->priv
;
1674 dev_info(&hdev
->pdev
->dev
, "received reset request from VF enet\n");
1676 if (hdev
->default_reset_request
)
1678 hclgevf_get_reset_level(&hdev
->default_reset_request
);
1680 hdev
->reset_level
= HNAE3_VF_FUNC_RESET
;
1682 /* reset of this VF requested */
1683 set_bit(HCLGEVF_RESET_REQUESTED
, &hdev
->reset_state
);
1684 hclgevf_reset_task_schedule(hdev
);
1686 hdev
->last_reset_time
= jiffies
;
1689 static void hclgevf_set_def_reset_request(struct hnae3_ae_dev
*ae_dev
,
1690 enum hnae3_reset_type rst_type
)
1692 struct hclgevf_dev
*hdev
= ae_dev
->priv
;
1694 set_bit(rst_type
, &hdev
->default_reset_request
);
1697 static void hclgevf_enable_vector(struct hclgevf_misc_vector
*vector
, bool en
)
1699 writel(en
? 1 : 0, vector
->addr
);
1702 static void hclgevf_reset_prepare_general(struct hnae3_ae_dev
*ae_dev
,
1703 enum hnae3_reset_type rst_type
)
1705 #define HCLGEVF_RESET_RETRY_WAIT_MS 500
1706 #define HCLGEVF_RESET_RETRY_CNT 5
1708 struct hclgevf_dev
*hdev
= ae_dev
->priv
;
1712 while (retry_cnt
++ < HCLGEVF_RESET_RETRY_CNT
) {
1713 down(&hdev
->reset_sem
);
1714 set_bit(HCLGEVF_STATE_RST_HANDLING
, &hdev
->state
);
1715 hdev
->reset_type
= rst_type
;
1716 ret
= hclgevf_reset_prepare(hdev
);
1717 if (!ret
&& !hdev
->reset_pending
)
1720 dev_err(&hdev
->pdev
->dev
,
1721 "failed to prepare to reset, ret=%d, reset_pending:0x%lx, retry_cnt:%d\n",
1722 ret
, hdev
->reset_pending
, retry_cnt
);
1723 clear_bit(HCLGEVF_STATE_RST_HANDLING
, &hdev
->state
);
1724 up(&hdev
->reset_sem
);
1725 msleep(HCLGEVF_RESET_RETRY_WAIT_MS
);
1728 /* disable misc vector before reset done */
1729 hclgevf_enable_vector(&hdev
->misc_vector
, false);
1731 if (hdev
->reset_type
== HNAE3_FLR_RESET
)
1732 hdev
->rst_stats
.flr_rst_cnt
++;
1735 static void hclgevf_reset_done(struct hnae3_ae_dev
*ae_dev
)
1737 struct hclgevf_dev
*hdev
= ae_dev
->priv
;
1740 hclgevf_enable_vector(&hdev
->misc_vector
, true);
1742 ret
= hclgevf_reset_rebuild(hdev
);
1744 dev_warn(&hdev
->pdev
->dev
, "fail to rebuild, ret=%d\n",
1747 hdev
->reset_type
= HNAE3_NONE_RESET
;
1748 if (test_and_clear_bit(HCLGEVF_STATE_RST_HANDLING
, &hdev
->state
))
1749 up(&hdev
->reset_sem
);
1752 static u32
hclgevf_get_fw_version(struct hnae3_handle
*handle
)
1754 struct hclgevf_dev
*hdev
= hclgevf_ae_get_hdev(handle
);
1756 return hdev
->fw_version
;
1759 static void hclgevf_get_misc_vector(struct hclgevf_dev
*hdev
)
1761 struct hclgevf_misc_vector
*vector
= &hdev
->misc_vector
;
1763 vector
->vector_irq
= pci_irq_vector(hdev
->pdev
,
1764 HCLGEVF_MISC_VECTOR_NUM
);
1765 vector
->addr
= hdev
->hw
.hw
.io_base
+ HCLGEVF_MISC_VECTOR_REG_BASE
;
1766 /* vector status always valid for Vector 0 */
1767 hdev
->vector_status
[HCLGEVF_MISC_VECTOR_NUM
] = 0;
1768 hdev
->vector_irq
[HCLGEVF_MISC_VECTOR_NUM
] = vector
->vector_irq
;
1770 hdev
->num_msi_left
-= 1;
1771 hdev
->num_msi_used
+= 1;
1774 void hclgevf_reset_task_schedule(struct hclgevf_dev
*hdev
)
1776 if (!test_bit(HCLGEVF_STATE_REMOVING
, &hdev
->state
) &&
1777 test_bit(HCLGEVF_STATE_SERVICE_INITED
, &hdev
->state
) &&
1778 !test_and_set_bit(HCLGEVF_STATE_RST_SERVICE_SCHED
,
1780 mod_delayed_work(hclgevf_wq
, &hdev
->service_task
, 0);
1783 void hclgevf_mbx_task_schedule(struct hclgevf_dev
*hdev
)
1785 if (!test_bit(HCLGEVF_STATE_REMOVING
, &hdev
->state
) &&
1786 !test_and_set_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED
,
1788 mod_delayed_work(hclgevf_wq
, &hdev
->service_task
, 0);
1791 static void hclgevf_task_schedule(struct hclgevf_dev
*hdev
,
1792 unsigned long delay
)
1794 if (!test_bit(HCLGEVF_STATE_REMOVING
, &hdev
->state
) &&
1795 !test_bit(HCLGEVF_STATE_RST_FAIL
, &hdev
->state
))
1796 mod_delayed_work(hclgevf_wq
, &hdev
->service_task
, delay
);
1799 static void hclgevf_reset_service_task(struct hclgevf_dev
*hdev
)
1801 #define HCLGEVF_MAX_RESET_ATTEMPTS_CNT 3
1803 if (!test_and_clear_bit(HCLGEVF_STATE_RST_SERVICE_SCHED
, &hdev
->state
))
1806 down(&hdev
->reset_sem
);
1807 set_bit(HCLGEVF_STATE_RST_HANDLING
, &hdev
->state
);
1809 if (test_and_clear_bit(HCLGEVF_RESET_PENDING
,
1810 &hdev
->reset_state
)) {
1811 /* PF has intimated that it is about to reset the hardware.
1812 * We now have to poll & check if hardware has actually
1813 * completed the reset sequence. On hardware reset completion,
1814 * VF needs to reset the client and ae device.
1816 hdev
->reset_attempts
= 0;
1818 hdev
->last_reset_time
= jiffies
;
1820 hclgevf_get_reset_level(&hdev
->reset_pending
);
1821 if (hdev
->reset_type
!= HNAE3_NONE_RESET
)
1822 hclgevf_reset(hdev
);
1823 } else if (test_and_clear_bit(HCLGEVF_RESET_REQUESTED
,
1824 &hdev
->reset_state
)) {
1825 /* we could be here when either of below happens:
1826 * 1. reset was initiated due to watchdog timeout caused by
1827 * a. IMP was earlier reset and our TX got choked down and
1828 * which resulted in watchdog reacting and inducing VF
1829 * reset. This also means our cmdq would be unreliable.
1830 * b. problem in TX due to other lower layer(example link
1831 * layer not functioning properly etc.)
1832 * 2. VF reset might have been initiated due to some config
1835 * NOTE: Theres no clear way to detect above cases than to react
1836 * to the response of PF for this reset request. PF will ack the
1837 * 1b and 2. cases but we will not get any intimation about 1a
1838 * from PF as cmdq would be in unreliable state i.e. mailbox
1839 * communication between PF and VF would be broken.
1841 * if we are never geting into pending state it means either:
1842 * 1. PF is not receiving our request which could be due to IMP
1845 * We cannot do much for 2. but to check first we can try reset
1846 * our PCIe + stack and see if it alleviates the problem.
1848 if (hdev
->reset_attempts
> HCLGEVF_MAX_RESET_ATTEMPTS_CNT
) {
1849 /* prepare for full reset of stack + pcie interface */
1850 set_bit(HNAE3_VF_FULL_RESET
, &hdev
->reset_pending
);
1852 /* "defer" schedule the reset task again */
1853 set_bit(HCLGEVF_RESET_PENDING
, &hdev
->reset_state
);
1855 hdev
->reset_attempts
++;
1857 set_bit(hdev
->reset_level
, &hdev
->reset_pending
);
1858 set_bit(HCLGEVF_RESET_PENDING
, &hdev
->reset_state
);
1860 hclgevf_reset_task_schedule(hdev
);
1863 hdev
->reset_type
= HNAE3_NONE_RESET
;
1864 clear_bit(HCLGEVF_STATE_RST_HANDLING
, &hdev
->state
);
1865 up(&hdev
->reset_sem
);
1868 static void hclgevf_mailbox_service_task(struct hclgevf_dev
*hdev
)
1870 if (!test_and_clear_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED
, &hdev
->state
))
1873 if (test_and_set_bit(HCLGEVF_STATE_MBX_HANDLING
, &hdev
->state
))
1876 hclgevf_mbx_async_handler(hdev
);
1878 clear_bit(HCLGEVF_STATE_MBX_HANDLING
, &hdev
->state
);
1881 static void hclgevf_keep_alive(struct hclgevf_dev
*hdev
)
1883 struct hclge_vf_to_pf_msg send_msg
;
1886 if (test_bit(HCLGE_COMM_STATE_CMD_DISABLE
, &hdev
->hw
.hw
.comm_state
))
1889 hclgevf_build_send_msg(&send_msg
, HCLGE_MBX_KEEP_ALIVE
, 0);
1890 ret
= hclgevf_send_mbx_msg(hdev
, &send_msg
, false, NULL
, 0);
1892 dev_err(&hdev
->pdev
->dev
,
1893 "VF sends keep alive cmd failed(=%d)\n", ret
);
1896 static void hclgevf_periodic_service_task(struct hclgevf_dev
*hdev
)
1898 unsigned long delta
= round_jiffies_relative(HZ
);
1899 struct hnae3_handle
*handle
= &hdev
->nic
;
1901 if (test_bit(HCLGEVF_STATE_RST_FAIL
, &hdev
->state
) ||
1902 test_bit(HCLGE_COMM_STATE_CMD_DISABLE
, &hdev
->hw
.hw
.comm_state
))
1905 if (time_is_after_jiffies(hdev
->last_serv_processed
+ HZ
)) {
1906 delta
= jiffies
- hdev
->last_serv_processed
;
1908 if (delta
< round_jiffies_relative(HZ
)) {
1909 delta
= round_jiffies_relative(HZ
) - delta
;
1914 hdev
->serv_processed_cnt
++;
1915 if (!(hdev
->serv_processed_cnt
% HCLGEVF_KEEP_ALIVE_TASK_INTERVAL
))
1916 hclgevf_keep_alive(hdev
);
1918 if (test_bit(HCLGEVF_STATE_DOWN
, &hdev
->state
)) {
1919 hdev
->last_serv_processed
= jiffies
;
1923 if (!(hdev
->serv_processed_cnt
% HCLGEVF_STATS_TIMER_INTERVAL
))
1924 hclge_comm_tqps_update_stats(handle
, &hdev
->hw
.hw
);
1926 /* VF does not need to request link status when this bit is set, because
1927 * PF will push its link status to VFs when link status changed.
1929 if (!test_bit(HCLGEVF_STATE_PF_PUSH_LINK_STATUS
, &hdev
->state
))
1930 hclgevf_request_link_info(hdev
);
1932 hclgevf_update_link_mode(hdev
);
1934 hclgevf_sync_vlan_filter(hdev
);
1936 hclgevf_sync_mac_table(hdev
);
1938 hclgevf_sync_promisc_mode(hdev
);
1940 hdev
->last_serv_processed
= jiffies
;
1943 hclgevf_task_schedule(hdev
, delta
);
1946 static void hclgevf_service_task(struct work_struct
*work
)
1948 struct hclgevf_dev
*hdev
= container_of(work
, struct hclgevf_dev
,
1951 hclgevf_reset_service_task(hdev
);
1952 hclgevf_mailbox_service_task(hdev
);
1953 hclgevf_periodic_service_task(hdev
);
1955 /* Handle reset and mbx again in case periodical task delays the
1956 * handling by calling hclgevf_task_schedule() in
1957 * hclgevf_periodic_service_task()
1959 hclgevf_reset_service_task(hdev
);
1960 hclgevf_mailbox_service_task(hdev
);
1963 static void hclgevf_clear_event_cause(struct hclgevf_dev
*hdev
, u32 regclr
)
1965 hclgevf_write_dev(&hdev
->hw
, HCLGE_COMM_VECTOR0_CMDQ_SRC_REG
, regclr
);
1968 static enum hclgevf_evt_cause
hclgevf_check_evt_cause(struct hclgevf_dev
*hdev
,
1971 u32 val
, cmdq_stat_reg
, rst_ing_reg
;
1973 /* fetch the events from their corresponding regs */
1974 cmdq_stat_reg
= hclgevf_read_dev(&hdev
->hw
,
1975 HCLGE_COMM_VECTOR0_CMDQ_STATE_REG
);
1976 if (BIT(HCLGEVF_VECTOR0_RST_INT_B
) & cmdq_stat_reg
) {
1977 rst_ing_reg
= hclgevf_read_dev(&hdev
->hw
, HCLGEVF_RST_ING
);
1978 dev_info(&hdev
->pdev
->dev
,
1979 "receive reset interrupt 0x%x!\n", rst_ing_reg
);
1980 set_bit(HNAE3_VF_RESET
, &hdev
->reset_pending
);
1981 set_bit(HCLGEVF_RESET_PENDING
, &hdev
->reset_state
);
1982 set_bit(HCLGE_COMM_STATE_CMD_DISABLE
, &hdev
->hw
.hw
.comm_state
);
1983 *clearval
= ~(1U << HCLGEVF_VECTOR0_RST_INT_B
);
1984 hdev
->rst_stats
.vf_rst_cnt
++;
1985 /* set up VF hardware reset status, its PF will clear
1986 * this status when PF has initialized done.
1988 val
= hclgevf_read_dev(&hdev
->hw
, HCLGEVF_VF_RST_ING
);
1989 hclgevf_write_dev(&hdev
->hw
, HCLGEVF_VF_RST_ING
,
1990 val
| HCLGEVF_VF_RST_ING_BIT
);
1991 return HCLGEVF_VECTOR0_EVENT_RST
;
1994 /* check for vector0 mailbox(=CMDQ RX) event source */
1995 if (BIT(HCLGEVF_VECTOR0_RX_CMDQ_INT_B
) & cmdq_stat_reg
) {
1996 /* for revision 0x21, clearing interrupt is writing bit 0
1997 * to the clear register, writing bit 1 means to keep the
1999 * for revision 0x20, the clear register is a read & write
2000 * register, so we should just write 0 to the bit we are
2001 * handling, and keep other bits as cmdq_stat_reg.
2003 if (hdev
->ae_dev
->dev_version
>= HNAE3_DEVICE_VERSION_V2
)
2004 *clearval
= ~(1U << HCLGEVF_VECTOR0_RX_CMDQ_INT_B
);
2006 *clearval
= cmdq_stat_reg
&
2007 ~BIT(HCLGEVF_VECTOR0_RX_CMDQ_INT_B
);
2009 return HCLGEVF_VECTOR0_EVENT_MBX
;
2012 /* print other vector0 event source */
2013 dev_info(&hdev
->pdev
->dev
,
2014 "vector 0 interrupt from unknown source, cmdq_src = %#x\n",
2017 return HCLGEVF_VECTOR0_EVENT_OTHER
;
2020 static void hclgevf_reset_timer(struct timer_list
*t
)
2022 struct hclgevf_dev
*hdev
= from_timer(hdev
, t
, reset_timer
);
2024 hclgevf_clear_event_cause(hdev
, HCLGEVF_VECTOR0_EVENT_RST
);
2025 hclgevf_reset_task_schedule(hdev
);
2028 static irqreturn_t
hclgevf_misc_irq_handle(int irq
, void *data
)
2030 #define HCLGEVF_RESET_DELAY 5
2032 enum hclgevf_evt_cause event_cause
;
2033 struct hclgevf_dev
*hdev
= data
;
2036 hclgevf_enable_vector(&hdev
->misc_vector
, false);
2037 event_cause
= hclgevf_check_evt_cause(hdev
, &clearval
);
2038 if (event_cause
!= HCLGEVF_VECTOR0_EVENT_OTHER
)
2039 hclgevf_clear_event_cause(hdev
, clearval
);
2041 switch (event_cause
) {
2042 case HCLGEVF_VECTOR0_EVENT_RST
:
2043 mod_timer(&hdev
->reset_timer
,
2044 jiffies
+ msecs_to_jiffies(HCLGEVF_RESET_DELAY
));
2046 case HCLGEVF_VECTOR0_EVENT_MBX
:
2047 hclgevf_mbx_handler(hdev
);
2053 hclgevf_enable_vector(&hdev
->misc_vector
, true);
2058 static int hclgevf_configure(struct hclgevf_dev
*hdev
)
2062 hdev
->gro_en
= true;
2064 ret
= hclgevf_get_basic_info(hdev
);
2068 /* get current port based vlan state from PF */
2069 ret
= hclgevf_get_port_base_vlan_filter_state(hdev
);
2073 /* get queue configuration from PF */
2074 ret
= hclgevf_get_queue_info(hdev
);
2078 /* get queue depth info from PF */
2079 ret
= hclgevf_get_queue_depth(hdev
);
2083 return hclgevf_get_pf_media_type(hdev
);
2086 static int hclgevf_alloc_hdev(struct hnae3_ae_dev
*ae_dev
)
2088 struct pci_dev
*pdev
= ae_dev
->pdev
;
2089 struct hclgevf_dev
*hdev
;
2091 hdev
= devm_kzalloc(&pdev
->dev
, sizeof(*hdev
), GFP_KERNEL
);
2096 hdev
->ae_dev
= ae_dev
;
2097 ae_dev
->priv
= hdev
;
2102 static int hclgevf_init_roce_base_info(struct hclgevf_dev
*hdev
)
2104 struct hnae3_handle
*roce
= &hdev
->roce
;
2105 struct hnae3_handle
*nic
= &hdev
->nic
;
2107 roce
->rinfo
.num_vectors
= hdev
->num_roce_msix
;
2109 if (hdev
->num_msi_left
< roce
->rinfo
.num_vectors
||
2110 hdev
->num_msi_left
== 0)
2113 roce
->rinfo
.base_vector
= hdev
->roce_base_msix_offset
;
2115 roce
->rinfo
.netdev
= nic
->kinfo
.netdev
;
2116 roce
->rinfo
.roce_io_base
= hdev
->hw
.hw
.io_base
;
2117 roce
->rinfo
.roce_mem_base
= hdev
->hw
.hw
.mem_base
;
2119 roce
->pdev
= nic
->pdev
;
2120 roce
->ae_algo
= nic
->ae_algo
;
2121 bitmap_copy(roce
->numa_node_mask
.bits
, nic
->numa_node_mask
.bits
,
2126 static int hclgevf_config_gro(struct hclgevf_dev
*hdev
)
2128 struct hclgevf_cfg_gro_status_cmd
*req
;
2129 struct hclge_desc desc
;
2132 if (!hnae3_ae_dev_gro_supported(hdev
->ae_dev
))
2135 hclgevf_cmd_setup_basic_desc(&desc
, HCLGE_OPC_GRO_GENERIC_CONFIG
,
2137 req
= (struct hclgevf_cfg_gro_status_cmd
*)desc
.data
;
2139 req
->gro_en
= hdev
->gro_en
? 1 : 0;
2141 ret
= hclgevf_cmd_send(&hdev
->hw
, &desc
, 1);
2143 dev_err(&hdev
->pdev
->dev
,
2144 "VF GRO hardware config cmd failed, ret = %d.\n", ret
);
2149 static int hclgevf_rss_init_hw(struct hclgevf_dev
*hdev
)
2151 struct hclge_comm_rss_cfg
*rss_cfg
= &hdev
->rss_cfg
;
2152 u16 tc_offset
[HCLGE_COMM_MAX_TC_NUM
];
2153 u16 tc_valid
[HCLGE_COMM_MAX_TC_NUM
];
2154 u16 tc_size
[HCLGE_COMM_MAX_TC_NUM
];
2157 if (hdev
->ae_dev
->dev_version
>= HNAE3_DEVICE_VERSION_V2
) {
2158 ret
= hclge_comm_set_rss_algo_key(&hdev
->hw
.hw
,
2160 rss_cfg
->rss_hash_key
);
2164 ret
= hclge_comm_set_rss_input_tuple(&hdev
->hw
.hw
, rss_cfg
);
2169 ret
= hclge_comm_set_rss_indir_table(hdev
->ae_dev
, &hdev
->hw
.hw
,
2170 rss_cfg
->rss_indirection_tbl
);
2174 hclge_comm_get_rss_tc_info(rss_cfg
->rss_size
, hdev
->hw_tc_map
,
2175 tc_offset
, tc_valid
, tc_size
);
2177 return hclge_comm_set_rss_tc_mode(&hdev
->hw
.hw
, tc_offset
,
2181 static int hclgevf_init_vlan_config(struct hclgevf_dev
*hdev
)
2183 struct hnae3_handle
*nic
= &hdev
->nic
;
2186 ret
= hclgevf_en_hw_strip_rxvtag(nic
, true);
2188 dev_err(&hdev
->pdev
->dev
,
2189 "failed to enable rx vlan offload, ret = %d\n", ret
);
2193 return hclgevf_set_vlan_filter(&hdev
->nic
, htons(ETH_P_8021Q
), 0,
2197 static void hclgevf_flush_link_update(struct hclgevf_dev
*hdev
)
2199 #define HCLGEVF_FLUSH_LINK_TIMEOUT 100000
2201 unsigned long last
= hdev
->serv_processed_cnt
;
2204 while (test_bit(HCLGEVF_STATE_LINK_UPDATING
, &hdev
->state
) &&
2205 i
++ < HCLGEVF_FLUSH_LINK_TIMEOUT
&&
2206 last
== hdev
->serv_processed_cnt
)
2210 static void hclgevf_set_timer_task(struct hnae3_handle
*handle
, bool enable
)
2212 struct hclgevf_dev
*hdev
= hclgevf_ae_get_hdev(handle
);
2215 hclgevf_task_schedule(hdev
, 0);
2217 set_bit(HCLGEVF_STATE_DOWN
, &hdev
->state
);
2219 smp_mb__after_atomic(); /* flush memory to make sure DOWN is seen by service task */
2220 hclgevf_flush_link_update(hdev
);
2224 static int hclgevf_ae_start(struct hnae3_handle
*handle
)
2226 struct hclgevf_dev
*hdev
= hclgevf_ae_get_hdev(handle
);
2228 clear_bit(HCLGEVF_STATE_DOWN
, &hdev
->state
);
2229 clear_bit(HCLGEVF_STATE_PF_PUSH_LINK_STATUS
, &hdev
->state
);
2231 hclge_comm_reset_tqp_stats(handle
);
2233 hclgevf_request_link_info(hdev
);
2235 hclgevf_update_link_mode(hdev
);
2240 static void hclgevf_ae_stop(struct hnae3_handle
*handle
)
2242 struct hclgevf_dev
*hdev
= hclgevf_ae_get_hdev(handle
);
2244 set_bit(HCLGEVF_STATE_DOWN
, &hdev
->state
);
2246 if (hdev
->reset_type
!= HNAE3_VF_RESET
)
2247 hclgevf_reset_tqp(handle
);
2249 hclge_comm_reset_tqp_stats(handle
);
2250 hclgevf_update_link_status(hdev
, 0);
2253 static int hclgevf_set_alive(struct hnae3_handle
*handle
, bool alive
)
2255 #define HCLGEVF_STATE_ALIVE 1
2256 #define HCLGEVF_STATE_NOT_ALIVE 0
2258 struct hclgevf_dev
*hdev
= hclgevf_ae_get_hdev(handle
);
2259 struct hclge_vf_to_pf_msg send_msg
;
2261 hclgevf_build_send_msg(&send_msg
, HCLGE_MBX_SET_ALIVE
, 0);
2262 send_msg
.data
[0] = alive
? HCLGEVF_STATE_ALIVE
:
2263 HCLGEVF_STATE_NOT_ALIVE
;
2264 return hclgevf_send_mbx_msg(hdev
, &send_msg
, false, NULL
, 0);
2267 static int hclgevf_client_start(struct hnae3_handle
*handle
)
2269 return hclgevf_set_alive(handle
, true);
2272 static void hclgevf_client_stop(struct hnae3_handle
*handle
)
2274 struct hclgevf_dev
*hdev
= hclgevf_ae_get_hdev(handle
);
2277 ret
= hclgevf_set_alive(handle
, false);
2279 dev_warn(&hdev
->pdev
->dev
,
2280 "%s failed %d\n", __func__
, ret
);
2283 static void hclgevf_state_init(struct hclgevf_dev
*hdev
)
2285 clear_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED
, &hdev
->state
);
2286 clear_bit(HCLGEVF_STATE_MBX_HANDLING
, &hdev
->state
);
2287 clear_bit(HCLGEVF_STATE_RST_FAIL
, &hdev
->state
);
2289 INIT_DELAYED_WORK(&hdev
->service_task
, hclgevf_service_task
);
2291 mutex_init(&hdev
->mbx_resp
.mbx_mutex
);
2292 sema_init(&hdev
->reset_sem
, 1);
2294 spin_lock_init(&hdev
->mac_table
.mac_list_lock
);
2295 INIT_LIST_HEAD(&hdev
->mac_table
.uc_mac_list
);
2296 INIT_LIST_HEAD(&hdev
->mac_table
.mc_mac_list
);
2298 /* bring the device down */
2299 set_bit(HCLGEVF_STATE_DOWN
, &hdev
->state
);
2302 static void hclgevf_state_uninit(struct hclgevf_dev
*hdev
)
2304 set_bit(HCLGEVF_STATE_DOWN
, &hdev
->state
);
2305 set_bit(HCLGEVF_STATE_REMOVING
, &hdev
->state
);
2307 if (hdev
->service_task
.work
.func
)
2308 cancel_delayed_work_sync(&hdev
->service_task
);
2310 mutex_destroy(&hdev
->mbx_resp
.mbx_mutex
);
2313 static int hclgevf_init_msi(struct hclgevf_dev
*hdev
)
2315 struct pci_dev
*pdev
= hdev
->pdev
;
2319 if (hnae3_dev_roce_supported(hdev
))
2320 vectors
= pci_alloc_irq_vectors(pdev
,
2321 hdev
->roce_base_msix_offset
+ 1,
2325 vectors
= pci_alloc_irq_vectors(pdev
, HNAE3_MIN_VECTOR_NUM
,
2327 PCI_IRQ_MSI
| PCI_IRQ_MSIX
);
2331 "failed(%d) to allocate MSI/MSI-X vectors\n",
2335 if (vectors
< hdev
->num_msi
)
2336 dev_warn(&hdev
->pdev
->dev
,
2337 "requested %u MSI/MSI-X, but allocated %d MSI/MSI-X\n",
2338 hdev
->num_msi
, vectors
);
2340 hdev
->num_msi
= vectors
;
2341 hdev
->num_msi_left
= vectors
;
2343 hdev
->vector_status
= devm_kcalloc(&pdev
->dev
, hdev
->num_msi
,
2344 sizeof(u16
), GFP_KERNEL
);
2345 if (!hdev
->vector_status
) {
2346 pci_free_irq_vectors(pdev
);
2350 for (i
= 0; i
< hdev
->num_msi
; i
++)
2351 hdev
->vector_status
[i
] = HCLGEVF_INVALID_VPORT
;
2353 hdev
->vector_irq
= devm_kcalloc(&pdev
->dev
, hdev
->num_msi
,
2354 sizeof(int), GFP_KERNEL
);
2355 if (!hdev
->vector_irq
) {
2356 devm_kfree(&pdev
->dev
, hdev
->vector_status
);
2357 pci_free_irq_vectors(pdev
);
2364 static void hclgevf_uninit_msi(struct hclgevf_dev
*hdev
)
2366 struct pci_dev
*pdev
= hdev
->pdev
;
2368 devm_kfree(&pdev
->dev
, hdev
->vector_status
);
2369 devm_kfree(&pdev
->dev
, hdev
->vector_irq
);
2370 pci_free_irq_vectors(pdev
);
2373 static int hclgevf_misc_irq_init(struct hclgevf_dev
*hdev
)
2377 hclgevf_get_misc_vector(hdev
);
2379 snprintf(hdev
->misc_vector
.name
, HNAE3_INT_NAME_LEN
, "%s-misc-%s",
2380 HCLGEVF_NAME
, pci_name(hdev
->pdev
));
2381 ret
= request_irq(hdev
->misc_vector
.vector_irq
, hclgevf_misc_irq_handle
,
2382 0, hdev
->misc_vector
.name
, hdev
);
2384 dev_err(&hdev
->pdev
->dev
, "VF failed to request misc irq(%d)\n",
2385 hdev
->misc_vector
.vector_irq
);
2389 hclgevf_clear_event_cause(hdev
, 0);
2391 /* enable misc. vector(vector 0) */
2392 hclgevf_enable_vector(&hdev
->misc_vector
, true);
2397 static void hclgevf_misc_irq_uninit(struct hclgevf_dev
*hdev
)
2399 /* disable misc vector(vector 0) */
2400 hclgevf_enable_vector(&hdev
->misc_vector
, false);
2401 synchronize_irq(hdev
->misc_vector
.vector_irq
);
2402 free_irq(hdev
->misc_vector
.vector_irq
, hdev
);
2403 hclgevf_free_vector(hdev
, 0);
2406 static void hclgevf_info_show(struct hclgevf_dev
*hdev
)
2408 struct device
*dev
= &hdev
->pdev
->dev
;
2410 dev_info(dev
, "VF info begin:\n");
2412 dev_info(dev
, "Task queue pairs numbers: %u\n", hdev
->num_tqps
);
2413 dev_info(dev
, "Desc num per TX queue: %u\n", hdev
->num_tx_desc
);
2414 dev_info(dev
, "Desc num per RX queue: %u\n", hdev
->num_rx_desc
);
2415 dev_info(dev
, "Numbers of vports: %u\n", hdev
->num_alloc_vport
);
2416 dev_info(dev
, "HW tc map: 0x%x\n", hdev
->hw_tc_map
);
2417 dev_info(dev
, "PF media type of this VF: %u\n",
2418 hdev
->hw
.mac
.media_type
);
2420 dev_info(dev
, "VF info end.\n");
2423 static int hclgevf_init_nic_client_instance(struct hnae3_ae_dev
*ae_dev
,
2424 struct hnae3_client
*client
)
2426 struct hclgevf_dev
*hdev
= ae_dev
->priv
;
2427 int rst_cnt
= hdev
->rst_stats
.rst_cnt
;
2430 ret
= client
->ops
->init_instance(&hdev
->nic
);
2434 set_bit(HCLGEVF_STATE_NIC_REGISTERED
, &hdev
->state
);
2435 if (test_bit(HCLGEVF_STATE_RST_HANDLING
, &hdev
->state
) ||
2436 rst_cnt
!= hdev
->rst_stats
.rst_cnt
) {
2437 clear_bit(HCLGEVF_STATE_NIC_REGISTERED
, &hdev
->state
);
2439 client
->ops
->uninit_instance(&hdev
->nic
, 0);
2443 hnae3_set_client_init_flag(client
, ae_dev
, 1);
2445 if (netif_msg_drv(&hdev
->nic
))
2446 hclgevf_info_show(hdev
);
2451 static int hclgevf_init_roce_client_instance(struct hnae3_ae_dev
*ae_dev
,
2452 struct hnae3_client
*client
)
2454 struct hclgevf_dev
*hdev
= ae_dev
->priv
;
2457 if (!hnae3_dev_roce_supported(hdev
) || !hdev
->roce_client
||
2461 ret
= hclgevf_init_roce_base_info(hdev
);
2465 ret
= client
->ops
->init_instance(&hdev
->roce
);
2469 set_bit(HCLGEVF_STATE_ROCE_REGISTERED
, &hdev
->state
);
2470 hnae3_set_client_init_flag(client
, ae_dev
, 1);
2475 static int hclgevf_init_client_instance(struct hnae3_client
*client
,
2476 struct hnae3_ae_dev
*ae_dev
)
2478 struct hclgevf_dev
*hdev
= ae_dev
->priv
;
2481 switch (client
->type
) {
2482 case HNAE3_CLIENT_KNIC
:
2483 hdev
->nic_client
= client
;
2484 hdev
->nic
.client
= client
;
2486 ret
= hclgevf_init_nic_client_instance(ae_dev
, client
);
2490 ret
= hclgevf_init_roce_client_instance(ae_dev
,
2496 case HNAE3_CLIENT_ROCE
:
2497 if (hnae3_dev_roce_supported(hdev
)) {
2498 hdev
->roce_client
= client
;
2499 hdev
->roce
.client
= client
;
2502 ret
= hclgevf_init_roce_client_instance(ae_dev
, client
);
2514 hdev
->nic_client
= NULL
;
2515 hdev
->nic
.client
= NULL
;
2518 hdev
->roce_client
= NULL
;
2519 hdev
->roce
.client
= NULL
;
2523 static void hclgevf_uninit_client_instance(struct hnae3_client
*client
,
2524 struct hnae3_ae_dev
*ae_dev
)
2526 struct hclgevf_dev
*hdev
= ae_dev
->priv
;
2528 /* un-init roce, if it exists */
2529 if (hdev
->roce_client
) {
2530 while (test_bit(HCLGEVF_STATE_RST_HANDLING
, &hdev
->state
))
2531 msleep(HCLGEVF_WAIT_RESET_DONE
);
2532 clear_bit(HCLGEVF_STATE_ROCE_REGISTERED
, &hdev
->state
);
2534 hdev
->roce_client
->ops
->uninit_instance(&hdev
->roce
, 0);
2535 hdev
->roce_client
= NULL
;
2536 hdev
->roce
.client
= NULL
;
2539 /* un-init nic/unic, if this was not called by roce client */
2540 if (client
->ops
->uninit_instance
&& hdev
->nic_client
&&
2541 client
->type
!= HNAE3_CLIENT_ROCE
) {
2542 while (test_bit(HCLGEVF_STATE_RST_HANDLING
, &hdev
->state
))
2543 msleep(HCLGEVF_WAIT_RESET_DONE
);
2544 clear_bit(HCLGEVF_STATE_NIC_REGISTERED
, &hdev
->state
);
2546 client
->ops
->uninit_instance(&hdev
->nic
, 0);
2547 hdev
->nic_client
= NULL
;
2548 hdev
->nic
.client
= NULL
;
2552 static int hclgevf_dev_mem_map(struct hclgevf_dev
*hdev
)
2554 struct pci_dev
*pdev
= hdev
->pdev
;
2555 struct hclgevf_hw
*hw
= &hdev
->hw
;
2557 /* for device does not have device memory, return directly */
2558 if (!(pci_select_bars(pdev
, IORESOURCE_MEM
) & BIT(HCLGEVF_MEM_BAR
)))
2562 devm_ioremap_wc(&pdev
->dev
,
2563 pci_resource_start(pdev
, HCLGEVF_MEM_BAR
),
2564 pci_resource_len(pdev
, HCLGEVF_MEM_BAR
));
2565 if (!hw
->hw
.mem_base
) {
2566 dev_err(&pdev
->dev
, "failed to map device memory\n");
2573 static int hclgevf_pci_init(struct hclgevf_dev
*hdev
)
2575 struct pci_dev
*pdev
= hdev
->pdev
;
2576 struct hclgevf_hw
*hw
;
2579 ret
= pci_enable_device(pdev
);
2581 dev_err(&pdev
->dev
, "failed to enable PCI device\n");
2585 ret
= dma_set_mask_and_coherent(&pdev
->dev
, DMA_BIT_MASK(64));
2587 dev_err(&pdev
->dev
, "can't set consistent PCI DMA, exiting");
2588 goto err_disable_device
;
2591 ret
= pci_request_regions(pdev
, HCLGEVF_DRIVER_NAME
);
2593 dev_err(&pdev
->dev
, "PCI request regions failed %d\n", ret
);
2594 goto err_disable_device
;
2597 pci_set_master(pdev
);
2599 hw
->hw
.io_base
= pci_iomap(pdev
, 2, 0);
2600 if (!hw
->hw
.io_base
) {
2601 dev_err(&pdev
->dev
, "can't map configuration register space\n");
2603 goto err_release_regions
;
2606 ret
= hclgevf_dev_mem_map(hdev
);
2608 goto err_unmap_io_base
;
2613 pci_iounmap(pdev
, hdev
->hw
.hw
.io_base
);
2614 err_release_regions
:
2615 pci_release_regions(pdev
);
2617 pci_disable_device(pdev
);
2622 static void hclgevf_pci_uninit(struct hclgevf_dev
*hdev
)
2624 struct pci_dev
*pdev
= hdev
->pdev
;
2626 if (hdev
->hw
.hw
.mem_base
)
2627 devm_iounmap(&pdev
->dev
, hdev
->hw
.hw
.mem_base
);
2629 pci_iounmap(pdev
, hdev
->hw
.hw
.io_base
);
2630 pci_release_regions(pdev
);
2631 pci_disable_device(pdev
);
2634 static int hclgevf_query_vf_resource(struct hclgevf_dev
*hdev
)
2636 struct hclgevf_query_res_cmd
*req
;
2637 struct hclge_desc desc
;
2640 hclgevf_cmd_setup_basic_desc(&desc
, HCLGE_OPC_QUERY_VF_RSRC
, true);
2641 ret
= hclgevf_cmd_send(&hdev
->hw
, &desc
, 1);
2643 dev_err(&hdev
->pdev
->dev
,
2644 "query vf resource failed, ret = %d.\n", ret
);
2648 req
= (struct hclgevf_query_res_cmd
*)desc
.data
;
2650 if (hnae3_dev_roce_supported(hdev
)) {
2651 hdev
->roce_base_msix_offset
=
2652 hnae3_get_field(le16_to_cpu(req
->msixcap_localid_ba_rocee
),
2653 HCLGEVF_MSIX_OFT_ROCEE_M
,
2654 HCLGEVF_MSIX_OFT_ROCEE_S
);
2655 hdev
->num_roce_msix
=
2656 hnae3_get_field(le16_to_cpu(req
->vf_intr_vector_number
),
2657 HCLGEVF_VEC_NUM_M
, HCLGEVF_VEC_NUM_S
);
2659 /* nic's msix numbers is always equals to the roce's. */
2660 hdev
->num_nic_msix
= hdev
->num_roce_msix
;
2662 /* VF should have NIC vectors and Roce vectors, NIC vectors
2663 * are queued before Roce vectors. The offset is fixed to 64.
2665 hdev
->num_msi
= hdev
->num_roce_msix
+
2666 hdev
->roce_base_msix_offset
;
2669 hnae3_get_field(le16_to_cpu(req
->vf_intr_vector_number
),
2670 HCLGEVF_VEC_NUM_M
, HCLGEVF_VEC_NUM_S
);
2672 hdev
->num_nic_msix
= hdev
->num_msi
;
2675 if (hdev
->num_nic_msix
< HNAE3_MIN_VECTOR_NUM
) {
2676 dev_err(&hdev
->pdev
->dev
,
2677 "Just %u msi resources, not enough for vf(min:2).\n",
2678 hdev
->num_nic_msix
);
2685 static void hclgevf_set_default_dev_specs(struct hclgevf_dev
*hdev
)
2687 #define HCLGEVF_MAX_NON_TSO_BD_NUM 8U
2689 struct hnae3_ae_dev
*ae_dev
= pci_get_drvdata(hdev
->pdev
);
2691 ae_dev
->dev_specs
.max_non_tso_bd_num
=
2692 HCLGEVF_MAX_NON_TSO_BD_NUM
;
2693 ae_dev
->dev_specs
.rss_ind_tbl_size
= HCLGEVF_RSS_IND_TBL_SIZE
;
2694 ae_dev
->dev_specs
.rss_key_size
= HCLGE_COMM_RSS_KEY_SIZE
;
2695 ae_dev
->dev_specs
.max_int_gl
= HCLGEVF_DEF_MAX_INT_GL
;
2696 ae_dev
->dev_specs
.max_frm_size
= HCLGEVF_MAC_MAX_FRAME
;
2699 static void hclgevf_parse_dev_specs(struct hclgevf_dev
*hdev
,
2700 struct hclge_desc
*desc
)
2702 struct hnae3_ae_dev
*ae_dev
= pci_get_drvdata(hdev
->pdev
);
2703 struct hclgevf_dev_specs_0_cmd
*req0
;
2704 struct hclgevf_dev_specs_1_cmd
*req1
;
2706 req0
= (struct hclgevf_dev_specs_0_cmd
*)desc
[0].data
;
2707 req1
= (struct hclgevf_dev_specs_1_cmd
*)desc
[1].data
;
2709 ae_dev
->dev_specs
.max_non_tso_bd_num
= req0
->max_non_tso_bd_num
;
2710 ae_dev
->dev_specs
.rss_ind_tbl_size
=
2711 le16_to_cpu(req0
->rss_ind_tbl_size
);
2712 ae_dev
->dev_specs
.int_ql_max
= le16_to_cpu(req0
->int_ql_max
);
2713 ae_dev
->dev_specs
.rss_key_size
= le16_to_cpu(req0
->rss_key_size
);
2714 ae_dev
->dev_specs
.max_int_gl
= le16_to_cpu(req1
->max_int_gl
);
2715 ae_dev
->dev_specs
.max_frm_size
= le16_to_cpu(req1
->max_frm_size
);
2718 static void hclgevf_check_dev_specs(struct hclgevf_dev
*hdev
)
2720 struct hnae3_dev_specs
*dev_specs
= &hdev
->ae_dev
->dev_specs
;
2722 if (!dev_specs
->max_non_tso_bd_num
)
2723 dev_specs
->max_non_tso_bd_num
= HCLGEVF_MAX_NON_TSO_BD_NUM
;
2724 if (!dev_specs
->rss_ind_tbl_size
)
2725 dev_specs
->rss_ind_tbl_size
= HCLGEVF_RSS_IND_TBL_SIZE
;
2726 if (!dev_specs
->rss_key_size
)
2727 dev_specs
->rss_key_size
= HCLGE_COMM_RSS_KEY_SIZE
;
2728 if (!dev_specs
->max_int_gl
)
2729 dev_specs
->max_int_gl
= HCLGEVF_DEF_MAX_INT_GL
;
2730 if (!dev_specs
->max_frm_size
)
2731 dev_specs
->max_frm_size
= HCLGEVF_MAC_MAX_FRAME
;
2734 static int hclgevf_query_dev_specs(struct hclgevf_dev
*hdev
)
2736 struct hclge_desc desc
[HCLGEVF_QUERY_DEV_SPECS_BD_NUM
];
2740 /* set default specifications as devices lower than version V3 do not
2741 * support querying specifications from firmware.
2743 if (hdev
->ae_dev
->dev_version
< HNAE3_DEVICE_VERSION_V3
) {
2744 hclgevf_set_default_dev_specs(hdev
);
2748 for (i
= 0; i
< HCLGEVF_QUERY_DEV_SPECS_BD_NUM
- 1; i
++) {
2749 hclgevf_cmd_setup_basic_desc(&desc
[i
],
2750 HCLGE_OPC_QUERY_DEV_SPECS
, true);
2751 desc
[i
].flag
|= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT
);
2753 hclgevf_cmd_setup_basic_desc(&desc
[i
], HCLGE_OPC_QUERY_DEV_SPECS
, true);
2755 ret
= hclgevf_cmd_send(&hdev
->hw
, desc
, HCLGEVF_QUERY_DEV_SPECS_BD_NUM
);
2759 hclgevf_parse_dev_specs(hdev
, desc
);
2760 hclgevf_check_dev_specs(hdev
);
2765 static int hclgevf_pci_reset(struct hclgevf_dev
*hdev
)
2767 struct pci_dev
*pdev
= hdev
->pdev
;
2770 if ((hdev
->reset_type
== HNAE3_VF_FULL_RESET
||
2771 hdev
->reset_type
== HNAE3_FLR_RESET
) &&
2772 test_bit(HCLGEVF_STATE_IRQ_INITED
, &hdev
->state
)) {
2773 hclgevf_misc_irq_uninit(hdev
);
2774 hclgevf_uninit_msi(hdev
);
2775 clear_bit(HCLGEVF_STATE_IRQ_INITED
, &hdev
->state
);
2778 if (!test_bit(HCLGEVF_STATE_IRQ_INITED
, &hdev
->state
)) {
2779 pci_set_master(pdev
);
2780 ret
= hclgevf_init_msi(hdev
);
2783 "failed(%d) to init MSI/MSI-X\n", ret
);
2787 ret
= hclgevf_misc_irq_init(hdev
);
2789 hclgevf_uninit_msi(hdev
);
2790 dev_err(&pdev
->dev
, "failed(%d) to init Misc IRQ(vector0)\n",
2795 set_bit(HCLGEVF_STATE_IRQ_INITED
, &hdev
->state
);
2801 static int hclgevf_clear_vport_list(struct hclgevf_dev
*hdev
)
2803 struct hclge_vf_to_pf_msg send_msg
;
2805 hclgevf_build_send_msg(&send_msg
, HCLGE_MBX_HANDLE_VF_TBL
,
2806 HCLGE_MBX_VPORT_LIST_CLEAR
);
2807 return hclgevf_send_mbx_msg(hdev
, &send_msg
, false, NULL
, 0);
2810 static void hclgevf_init_rxd_adv_layout(struct hclgevf_dev
*hdev
)
2812 if (hnae3_ae_dev_rxd_adv_layout_supported(hdev
->ae_dev
))
2813 hclgevf_write_dev(&hdev
->hw
, HCLGEVF_RXD_ADV_LAYOUT_EN_REG
, 1);
2816 static void hclgevf_uninit_rxd_adv_layout(struct hclgevf_dev
*hdev
)
2818 if (hnae3_ae_dev_rxd_adv_layout_supported(hdev
->ae_dev
))
2819 hclgevf_write_dev(&hdev
->hw
, HCLGEVF_RXD_ADV_LAYOUT_EN_REG
, 0);
2822 static int hclgevf_reset_hdev(struct hclgevf_dev
*hdev
)
2824 struct pci_dev
*pdev
= hdev
->pdev
;
2827 ret
= hclgevf_pci_reset(hdev
);
2829 dev_err(&pdev
->dev
, "pci reset failed %d\n", ret
);
2833 hclgevf_arq_init(hdev
);
2835 ret
= hclge_comm_cmd_init(hdev
->ae_dev
, &hdev
->hw
.hw
,
2836 &hdev
->fw_version
, false,
2837 hdev
->reset_pending
);
2839 dev_err(&pdev
->dev
, "cmd failed %d\n", ret
);
2843 ret
= hclgevf_rss_init_hw(hdev
);
2845 dev_err(&hdev
->pdev
->dev
,
2846 "failed(%d) to initialize RSS\n", ret
);
2850 ret
= hclgevf_config_gro(hdev
);
2854 ret
= hclgevf_init_vlan_config(hdev
);
2856 dev_err(&hdev
->pdev
->dev
,
2857 "failed(%d) to initialize VLAN config\n", ret
);
2861 /* get current port based vlan state from PF */
2862 ret
= hclgevf_get_port_base_vlan_filter_state(hdev
);
2866 set_bit(HCLGEVF_STATE_PROMISC_CHANGED
, &hdev
->state
);
2868 hclgevf_init_rxd_adv_layout(hdev
);
2870 dev_info(&hdev
->pdev
->dev
, "Reset done\n");
2875 static int hclgevf_init_hdev(struct hclgevf_dev
*hdev
)
2877 struct pci_dev
*pdev
= hdev
->pdev
;
2880 ret
= hclgevf_pci_init(hdev
);
2884 ret
= hclge_comm_cmd_queue_init(hdev
->pdev
, &hdev
->hw
.hw
);
2886 goto err_cmd_queue_init
;
2888 hclgevf_arq_init(hdev
);
2890 hclge_comm_cmd_init_ops(&hdev
->hw
.hw
, &hclgevf_cmq_ops
);
2891 ret
= hclge_comm_cmd_init(hdev
->ae_dev
, &hdev
->hw
.hw
,
2892 &hdev
->fw_version
, false,
2893 hdev
->reset_pending
);
2897 /* Get vf resource */
2898 ret
= hclgevf_query_vf_resource(hdev
);
2902 ret
= hclgevf_query_dev_specs(hdev
);
2905 "failed to query dev specifications, ret = %d\n", ret
);
2909 ret
= hclgevf_init_msi(hdev
);
2911 dev_err(&pdev
->dev
, "failed(%d) to init MSI/MSI-X\n", ret
);
2915 hclgevf_state_init(hdev
);
2916 hdev
->reset_level
= HNAE3_VF_FUNC_RESET
;
2917 hdev
->reset_type
= HNAE3_NONE_RESET
;
2919 ret
= hclgevf_misc_irq_init(hdev
);
2921 goto err_misc_irq_init
;
2923 set_bit(HCLGEVF_STATE_IRQ_INITED
, &hdev
->state
);
2925 ret
= hclgevf_configure(hdev
);
2927 dev_err(&pdev
->dev
, "failed(%d) to fetch configuration\n", ret
);
2931 ret
= hclgevf_alloc_tqps(hdev
);
2933 dev_err(&pdev
->dev
, "failed(%d) to allocate TQPs\n", ret
);
2937 ret
= hclgevf_set_handle_info(hdev
);
2941 ret
= hclgevf_config_gro(hdev
);
2945 /* Initialize RSS for this VF */
2946 ret
= hclge_comm_rss_init_cfg(&hdev
->nic
, hdev
->ae_dev
,
2949 dev_err(&pdev
->dev
, "failed to init rss cfg, ret = %d\n", ret
);
2953 ret
= hclgevf_rss_init_hw(hdev
);
2955 dev_err(&hdev
->pdev
->dev
,
2956 "failed(%d) to initialize RSS\n", ret
);
2960 /* ensure vf tbl list as empty before init */
2961 ret
= hclgevf_clear_vport_list(hdev
);
2964 "failed to clear tbl list configuration, ret = %d.\n",
2969 ret
= hclgevf_init_vlan_config(hdev
);
2971 dev_err(&hdev
->pdev
->dev
,
2972 "failed(%d) to initialize VLAN config\n", ret
);
2976 hclgevf_init_rxd_adv_layout(hdev
);
2978 ret
= hclgevf_devlink_init(hdev
);
2982 set_bit(HCLGEVF_STATE_SERVICE_INITED
, &hdev
->state
);
2984 hdev
->last_reset_time
= jiffies
;
2985 dev_info(&hdev
->pdev
->dev
, "finished initializing %s driver\n",
2986 HCLGEVF_DRIVER_NAME
);
2988 hclgevf_task_schedule(hdev
, round_jiffies_relative(HZ
));
2989 timer_setup(&hdev
->reset_timer
, hclgevf_reset_timer
, 0);
2994 hclgevf_misc_irq_uninit(hdev
);
2996 hclgevf_state_uninit(hdev
);
2997 hclgevf_uninit_msi(hdev
);
2999 hclge_comm_cmd_uninit(hdev
->ae_dev
, &hdev
->hw
.hw
);
3001 hclgevf_pci_uninit(hdev
);
3002 clear_bit(HCLGEVF_STATE_IRQ_INITED
, &hdev
->state
);
3006 static void hclgevf_uninit_hdev(struct hclgevf_dev
*hdev
)
3008 struct hclge_vf_to_pf_msg send_msg
;
3010 hclgevf_state_uninit(hdev
);
3011 hclgevf_uninit_rxd_adv_layout(hdev
);
3013 hclgevf_build_send_msg(&send_msg
, HCLGE_MBX_VF_UNINIT
, 0);
3014 hclgevf_send_mbx_msg(hdev
, &send_msg
, false, NULL
, 0);
3016 if (test_bit(HCLGEVF_STATE_IRQ_INITED
, &hdev
->state
)) {
3017 hclgevf_misc_irq_uninit(hdev
);
3018 hclgevf_uninit_msi(hdev
);
3021 hclge_comm_cmd_uninit(hdev
->ae_dev
, &hdev
->hw
.hw
);
3022 hclgevf_devlink_uninit(hdev
);
3023 hclgevf_pci_uninit(hdev
);
3024 hclgevf_uninit_mac_list(hdev
);
3027 static int hclgevf_init_ae_dev(struct hnae3_ae_dev
*ae_dev
)
3029 struct pci_dev
*pdev
= ae_dev
->pdev
;
3032 ret
= hclgevf_alloc_hdev(ae_dev
);
3034 dev_err(&pdev
->dev
, "hclge device allocation failed\n");
3038 ret
= hclgevf_init_hdev(ae_dev
->priv
);
3040 dev_err(&pdev
->dev
, "hclge device initialization failed\n");
3047 static void hclgevf_uninit_ae_dev(struct hnae3_ae_dev
*ae_dev
)
3049 struct hclgevf_dev
*hdev
= ae_dev
->priv
;
3051 hclgevf_uninit_hdev(hdev
);
3052 ae_dev
->priv
= NULL
;
3055 static u32
hclgevf_get_max_channels(struct hclgevf_dev
*hdev
)
3057 struct hnae3_handle
*nic
= &hdev
->nic
;
3058 struct hnae3_knic_private_info
*kinfo
= &nic
->kinfo
;
3060 return min_t(u32
, hdev
->rss_size_max
,
3061 hdev
->num_tqps
/ kinfo
->tc_info
.num_tc
);
3065 * hclgevf_get_channels - Get the current channels enabled and max supported.
3066 * @handle: hardware information for network interface
3067 * @ch: ethtool channels structure
3069 * We don't support separate tx and rx queues as channels. The other count
3070 * represents how many queues are being used for control. max_combined counts
3071 * how many queue pairs we can support. They may not be mapped 1 to 1 with
3072 * q_vectors since we support a lot more queue pairs than q_vectors.
3074 static void hclgevf_get_channels(struct hnae3_handle
*handle
,
3075 struct ethtool_channels
*ch
)
3077 struct hclgevf_dev
*hdev
= hclgevf_ae_get_hdev(handle
);
3079 ch
->max_combined
= hclgevf_get_max_channels(hdev
);
3080 ch
->other_count
= 0;
3082 ch
->combined_count
= handle
->kinfo
.rss_size
;
3085 static void hclgevf_get_tqps_and_rss_info(struct hnae3_handle
*handle
,
3086 u16
*alloc_tqps
, u16
*max_rss_size
)
3088 struct hclgevf_dev
*hdev
= hclgevf_ae_get_hdev(handle
);
3090 *alloc_tqps
= hdev
->num_tqps
;
3091 *max_rss_size
= hdev
->rss_size_max
;
3094 static void hclgevf_update_rss_size(struct hnae3_handle
*handle
,
3097 struct hnae3_knic_private_info
*kinfo
= &handle
->kinfo
;
3098 struct hclgevf_dev
*hdev
= hclgevf_ae_get_hdev(handle
);
3101 kinfo
->req_rss_size
= new_tqps_num
;
3103 max_rss_size
= min_t(u16
, hdev
->rss_size_max
,
3104 hdev
->num_tqps
/ kinfo
->tc_info
.num_tc
);
3106 /* Use the user's configuration when it is not larger than
3107 * max_rss_size, otherwise, use the maximum specification value.
3109 if (kinfo
->req_rss_size
!= kinfo
->rss_size
&& kinfo
->req_rss_size
&&
3110 kinfo
->req_rss_size
<= max_rss_size
)
3111 kinfo
->rss_size
= kinfo
->req_rss_size
;
3112 else if (kinfo
->rss_size
> max_rss_size
||
3113 (!kinfo
->req_rss_size
&& kinfo
->rss_size
< max_rss_size
))
3114 kinfo
->rss_size
= max_rss_size
;
3116 kinfo
->num_tqps
= kinfo
->tc_info
.num_tc
* kinfo
->rss_size
;
3119 static int hclgevf_set_channels(struct hnae3_handle
*handle
, u32 new_tqps_num
,
3120 bool rxfh_configured
)
3122 struct hclgevf_dev
*hdev
= hclgevf_ae_get_hdev(handle
);
3123 struct hnae3_knic_private_info
*kinfo
= &handle
->kinfo
;
3124 u16 tc_offset
[HCLGE_COMM_MAX_TC_NUM
];
3125 u16 tc_valid
[HCLGE_COMM_MAX_TC_NUM
];
3126 u16 tc_size
[HCLGE_COMM_MAX_TC_NUM
];
3127 u16 cur_rss_size
= kinfo
->rss_size
;
3128 u16 cur_tqps
= kinfo
->num_tqps
;
3133 hclgevf_update_rss_size(handle
, new_tqps_num
);
3135 hclge_comm_get_rss_tc_info(kinfo
->rss_size
, hdev
->hw_tc_map
,
3136 tc_offset
, tc_valid
, tc_size
);
3137 ret
= hclge_comm_set_rss_tc_mode(&hdev
->hw
.hw
, tc_offset
,
3142 /* RSS indirection table has been configured by user */
3143 if (rxfh_configured
)
3146 /* Reinitializes the rss indirect table according to the new RSS size */
3147 rss_indir
= kcalloc(hdev
->ae_dev
->dev_specs
.rss_ind_tbl_size
,
3148 sizeof(u32
), GFP_KERNEL
);
3152 for (i
= 0; i
< hdev
->ae_dev
->dev_specs
.rss_ind_tbl_size
; i
++)
3153 rss_indir
[i
] = i
% kinfo
->rss_size
;
3155 hdev
->rss_cfg
.rss_size
= kinfo
->rss_size
;
3157 ret
= hclgevf_set_rss(handle
, rss_indir
, NULL
, 0);
3159 dev_err(&hdev
->pdev
->dev
, "set rss indir table fail, ret=%d\n",
3166 dev_info(&hdev
->pdev
->dev
,
3167 "Channels changed, rss_size from %u to %u, tqps from %u to %u",
3168 cur_rss_size
, kinfo
->rss_size
,
3169 cur_tqps
, kinfo
->rss_size
* kinfo
->tc_info
.num_tc
);
3174 static int hclgevf_get_status(struct hnae3_handle
*handle
)
3176 struct hclgevf_dev
*hdev
= hclgevf_ae_get_hdev(handle
);
3178 return hdev
->hw
.mac
.link
;
3181 static void hclgevf_get_ksettings_an_result(struct hnae3_handle
*handle
,
3182 u8
*auto_neg
, u32
*speed
,
3183 u8
*duplex
, u32
*lane_num
)
3185 struct hclgevf_dev
*hdev
= hclgevf_ae_get_hdev(handle
);
3188 *speed
= hdev
->hw
.mac
.speed
;
3190 *duplex
= hdev
->hw
.mac
.duplex
;
3192 *auto_neg
= AUTONEG_DISABLE
;
3195 void hclgevf_update_speed_duplex(struct hclgevf_dev
*hdev
, u32 speed
,
3198 hdev
->hw
.mac
.speed
= speed
;
3199 hdev
->hw
.mac
.duplex
= duplex
;
3202 static int hclgevf_gro_en(struct hnae3_handle
*handle
, bool enable
)
3204 struct hclgevf_dev
*hdev
= hclgevf_ae_get_hdev(handle
);
3205 bool gro_en_old
= hdev
->gro_en
;
3208 hdev
->gro_en
= enable
;
3209 ret
= hclgevf_config_gro(hdev
);
3211 hdev
->gro_en
= gro_en_old
;
3216 static void hclgevf_get_media_type(struct hnae3_handle
*handle
, u8
*media_type
,
3219 struct hclgevf_dev
*hdev
= hclgevf_ae_get_hdev(handle
);
3222 *media_type
= hdev
->hw
.mac
.media_type
;
3225 *module_type
= hdev
->hw
.mac
.module_type
;
3228 static bool hclgevf_get_hw_reset_stat(struct hnae3_handle
*handle
)
3230 struct hclgevf_dev
*hdev
= hclgevf_ae_get_hdev(handle
);
3232 return !!hclgevf_read_dev(&hdev
->hw
, HCLGEVF_RST_ING
);
3235 static bool hclgevf_get_cmdq_stat(struct hnae3_handle
*handle
)
3237 struct hclgevf_dev
*hdev
= hclgevf_ae_get_hdev(handle
);
3239 return test_bit(HCLGE_COMM_STATE_CMD_DISABLE
, &hdev
->hw
.hw
.comm_state
);
3242 static bool hclgevf_ae_dev_resetting(struct hnae3_handle
*handle
)
3244 struct hclgevf_dev
*hdev
= hclgevf_ae_get_hdev(handle
);
3246 return test_bit(HCLGEVF_STATE_RST_HANDLING
, &hdev
->state
);
3249 static unsigned long hclgevf_ae_dev_reset_cnt(struct hnae3_handle
*handle
)
3251 struct hclgevf_dev
*hdev
= hclgevf_ae_get_hdev(handle
);
3253 return hdev
->rst_stats
.hw_rst_done_cnt
;
3256 static void hclgevf_get_link_mode(struct hnae3_handle
*handle
,
3257 unsigned long *supported
,
3258 unsigned long *advertising
)
3260 struct hclgevf_dev
*hdev
= hclgevf_ae_get_hdev(handle
);
3262 *supported
= hdev
->hw
.mac
.supported
;
3263 *advertising
= hdev
->hw
.mac
.advertising
;
3266 void hclgevf_update_port_base_vlan_info(struct hclgevf_dev
*hdev
, u16 state
,
3267 struct hclge_mbx_port_base_vlan
*port_base_vlan
)
3269 struct hnae3_handle
*nic
= &hdev
->nic
;
3270 struct hclge_vf_to_pf_msg send_msg
;
3275 if (test_bit(HCLGEVF_STATE_RST_HANDLING
, &hdev
->state
) ||
3276 test_bit(HCLGEVF_STATE_RST_FAIL
, &hdev
->state
)) {
3277 dev_warn(&hdev
->pdev
->dev
,
3278 "is resetting when updating port based vlan info\n");
3283 ret
= hclgevf_notify_client(hdev
, HNAE3_DOWN_CLIENT
);
3289 /* send msg to PF and wait update port based vlan info */
3290 hclgevf_build_send_msg(&send_msg
, HCLGE_MBX_SET_VLAN
,
3291 HCLGE_MBX_PORT_BASE_VLAN_CFG
);
3292 memcpy(send_msg
.data
, port_base_vlan
, sizeof(*port_base_vlan
));
3293 ret
= hclgevf_send_mbx_msg(hdev
, &send_msg
, false, NULL
, 0);
3295 if (state
== HNAE3_PORT_BASE_VLAN_DISABLE
)
3296 nic
->port_base_vlan_state
= state
;
3298 nic
->port_base_vlan_state
= HNAE3_PORT_BASE_VLAN_ENABLE
;
3301 hclgevf_notify_client(hdev
, HNAE3_UP_CLIENT
);
3305 static const struct hnae3_ae_ops hclgevf_ops
= {
3306 .init_ae_dev
= hclgevf_init_ae_dev
,
3307 .uninit_ae_dev
= hclgevf_uninit_ae_dev
,
3308 .reset_prepare
= hclgevf_reset_prepare_general
,
3309 .reset_done
= hclgevf_reset_done
,
3310 .init_client_instance
= hclgevf_init_client_instance
,
3311 .uninit_client_instance
= hclgevf_uninit_client_instance
,
3312 .start
= hclgevf_ae_start
,
3313 .stop
= hclgevf_ae_stop
,
3314 .client_start
= hclgevf_client_start
,
3315 .client_stop
= hclgevf_client_stop
,
3316 .map_ring_to_vector
= hclgevf_map_ring_to_vector
,
3317 .unmap_ring_from_vector
= hclgevf_unmap_ring_from_vector
,
3318 .get_vector
= hclgevf_get_vector
,
3319 .put_vector
= hclgevf_put_vector
,
3320 .reset_queue
= hclgevf_reset_tqp
,
3321 .get_mac_addr
= hclgevf_get_mac_addr
,
3322 .set_mac_addr
= hclgevf_set_mac_addr
,
3323 .add_uc_addr
= hclgevf_add_uc_addr
,
3324 .rm_uc_addr
= hclgevf_rm_uc_addr
,
3325 .add_mc_addr
= hclgevf_add_mc_addr
,
3326 .rm_mc_addr
= hclgevf_rm_mc_addr
,
3327 .get_stats
= hclgevf_get_stats
,
3328 .update_stats
= hclgevf_update_stats
,
3329 .get_strings
= hclgevf_get_strings
,
3330 .get_sset_count
= hclgevf_get_sset_count
,
3331 .get_rss_key_size
= hclge_comm_get_rss_key_size
,
3332 .get_rss
= hclgevf_get_rss
,
3333 .set_rss
= hclgevf_set_rss
,
3334 .get_rss_tuple
= hclgevf_get_rss_tuple
,
3335 .set_rss_tuple
= hclgevf_set_rss_tuple
,
3336 .get_tc_size
= hclgevf_get_tc_size
,
3337 .get_fw_version
= hclgevf_get_fw_version
,
3338 .set_vlan_filter
= hclgevf_set_vlan_filter
,
3339 .enable_vlan_filter
= hclgevf_enable_vlan_filter
,
3340 .enable_hw_strip_rxvtag
= hclgevf_en_hw_strip_rxvtag
,
3341 .reset_event
= hclgevf_reset_event
,
3342 .set_default_reset_request
= hclgevf_set_def_reset_request
,
3343 .set_channels
= hclgevf_set_channels
,
3344 .get_channels
= hclgevf_get_channels
,
3345 .get_tqps_and_rss_info
= hclgevf_get_tqps_and_rss_info
,
3346 .get_regs_len
= hclgevf_get_regs_len
,
3347 .get_regs
= hclgevf_get_regs
,
3348 .get_status
= hclgevf_get_status
,
3349 .get_ksettings_an_result
= hclgevf_get_ksettings_an_result
,
3350 .get_media_type
= hclgevf_get_media_type
,
3351 .get_hw_reset_stat
= hclgevf_get_hw_reset_stat
,
3352 .ae_dev_resetting
= hclgevf_ae_dev_resetting
,
3353 .ae_dev_reset_cnt
= hclgevf_ae_dev_reset_cnt
,
3354 .set_gro_en
= hclgevf_gro_en
,
3355 .set_mtu
= hclgevf_set_mtu
,
3356 .get_global_queue_id
= hclgevf_get_qid_global
,
3357 .set_timer_task
= hclgevf_set_timer_task
,
3358 .get_link_mode
= hclgevf_get_link_mode
,
3359 .set_promisc_mode
= hclgevf_set_promisc_mode
,
3360 .request_update_promisc_mode
= hclgevf_request_update_promisc_mode
,
3361 .get_cmdq_stat
= hclgevf_get_cmdq_stat
,
3364 static struct hnae3_ae_algo ae_algovf
= {
3365 .ops
= &hclgevf_ops
,
3366 .pdev_id_table
= ae_algovf_pci_tbl
,
3369 static int __init
hclgevf_init(void)
3371 pr_info("%s is initializing\n", HCLGEVF_NAME
);
3373 hclgevf_wq
= alloc_workqueue("%s", WQ_UNBOUND
, 0, HCLGEVF_NAME
);
3375 pr_err("%s: failed to create workqueue\n", HCLGEVF_NAME
);
3379 hnae3_register_ae_algo(&ae_algovf
);
3384 static void __exit
hclgevf_exit(void)
3386 hnae3_unregister_ae_algo(&ae_algovf
);
3387 destroy_workqueue(hclgevf_wq
);
3389 module_init(hclgevf_init
);
3390 module_exit(hclgevf_exit
);
3392 MODULE_LICENSE("GPL");
3393 MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
3394 MODULE_DESCRIPTION("HCLGEVF Driver");
3395 MODULE_VERSION(HCLGEVF_MOD_VERSION
);