1 // SPDX-License-Identifier: GPL-2.0+
2 // Copyright (c) 2016-2017 Hisilicon Limited.
4 #include <linux/etherdevice.h>
5 #include <linux/iopoll.h>
6 #include <net/rtnetlink.h>
7 #include "hclgevf_cmd.h"
8 #include "hclgevf_main.h"
12 #define HCLGEVF_NAME "hclgevf"
14 #define HCLGEVF_RESET_MAX_FAIL_CNT 5
16 static int hclgevf_reset_hdev(struct hclgevf_dev
*hdev
);
17 static void hclgevf_task_schedule(struct hclgevf_dev
*hdev
,
20 static struct hnae3_ae_algo ae_algovf
;
22 static struct workqueue_struct
*hclgevf_wq
;
24 static const struct pci_device_id ae_algovf_pci_tbl
[] = {
25 {PCI_VDEVICE(HUAWEI
, HNAE3_DEV_ID_VF
), 0},
26 {PCI_VDEVICE(HUAWEI
, HNAE3_DEV_ID_RDMA_DCB_PFC_VF
),
27 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS
},
28 /* required last entry */
32 static const u8 hclgevf_hash_key
[] = {
33 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
34 0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
35 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
36 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
37 0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA
40 MODULE_DEVICE_TABLE(pci
, ae_algovf_pci_tbl
);
42 static const u32 cmdq_reg_addr_list
[] = {HCLGEVF_CMDQ_TX_ADDR_L_REG
,
43 HCLGEVF_CMDQ_TX_ADDR_H_REG
,
44 HCLGEVF_CMDQ_TX_DEPTH_REG
,
45 HCLGEVF_CMDQ_TX_TAIL_REG
,
46 HCLGEVF_CMDQ_TX_HEAD_REG
,
47 HCLGEVF_CMDQ_RX_ADDR_L_REG
,
48 HCLGEVF_CMDQ_RX_ADDR_H_REG
,
49 HCLGEVF_CMDQ_RX_DEPTH_REG
,
50 HCLGEVF_CMDQ_RX_TAIL_REG
,
51 HCLGEVF_CMDQ_RX_HEAD_REG
,
52 HCLGEVF_VECTOR0_CMDQ_SRC_REG
,
53 HCLGEVF_VECTOR0_CMDQ_STATE_REG
,
54 HCLGEVF_CMDQ_INTR_EN_REG
,
55 HCLGEVF_CMDQ_INTR_GEN_REG
};
57 static const u32 common_reg_addr_list
[] = {HCLGEVF_MISC_VECTOR_REG_BASE
,
61 static const u32 ring_reg_addr_list
[] = {HCLGEVF_RING_RX_ADDR_L_REG
,
62 HCLGEVF_RING_RX_ADDR_H_REG
,
63 HCLGEVF_RING_RX_BD_NUM_REG
,
64 HCLGEVF_RING_RX_BD_LENGTH_REG
,
65 HCLGEVF_RING_RX_MERGE_EN_REG
,
66 HCLGEVF_RING_RX_TAIL_REG
,
67 HCLGEVF_RING_RX_HEAD_REG
,
68 HCLGEVF_RING_RX_FBD_NUM_REG
,
69 HCLGEVF_RING_RX_OFFSET_REG
,
70 HCLGEVF_RING_RX_FBD_OFFSET_REG
,
71 HCLGEVF_RING_RX_STASH_REG
,
72 HCLGEVF_RING_RX_BD_ERR_REG
,
73 HCLGEVF_RING_TX_ADDR_L_REG
,
74 HCLGEVF_RING_TX_ADDR_H_REG
,
75 HCLGEVF_RING_TX_BD_NUM_REG
,
76 HCLGEVF_RING_TX_PRIORITY_REG
,
77 HCLGEVF_RING_TX_TC_REG
,
78 HCLGEVF_RING_TX_MERGE_EN_REG
,
79 HCLGEVF_RING_TX_TAIL_REG
,
80 HCLGEVF_RING_TX_HEAD_REG
,
81 HCLGEVF_RING_TX_FBD_NUM_REG
,
82 HCLGEVF_RING_TX_OFFSET_REG
,
83 HCLGEVF_RING_TX_EBD_NUM_REG
,
84 HCLGEVF_RING_TX_EBD_OFFSET_REG
,
85 HCLGEVF_RING_TX_BD_ERR_REG
,
88 static const u32 tqp_intr_reg_addr_list
[] = {HCLGEVF_TQP_INTR_CTRL_REG
,
89 HCLGEVF_TQP_INTR_GL0_REG
,
90 HCLGEVF_TQP_INTR_GL1_REG
,
91 HCLGEVF_TQP_INTR_GL2_REG
,
92 HCLGEVF_TQP_INTR_RL_REG
};
94 static struct hclgevf_dev
*hclgevf_ae_get_hdev(struct hnae3_handle
*handle
)
97 return container_of(handle
, struct hclgevf_dev
, nic
);
98 else if (handle
->client
->type
== HNAE3_CLIENT_ROCE
)
99 return container_of(handle
, struct hclgevf_dev
, roce
);
101 return container_of(handle
, struct hclgevf_dev
, nic
);
104 static int hclgevf_tqps_update_stats(struct hnae3_handle
*handle
)
106 struct hnae3_knic_private_info
*kinfo
= &handle
->kinfo
;
107 struct hclgevf_dev
*hdev
= hclgevf_ae_get_hdev(handle
);
108 struct hclgevf_desc desc
;
109 struct hclgevf_tqp
*tqp
;
113 for (i
= 0; i
< kinfo
->num_tqps
; i
++) {
114 tqp
= container_of(kinfo
->tqp
[i
], struct hclgevf_tqp
, q
);
115 hclgevf_cmd_setup_basic_desc(&desc
,
116 HCLGEVF_OPC_QUERY_RX_STATUS
,
119 desc
.data
[0] = cpu_to_le32(tqp
->index
& 0x1ff);
120 status
= hclgevf_cmd_send(&hdev
->hw
, &desc
, 1);
122 dev_err(&hdev
->pdev
->dev
,
123 "Query tqp stat fail, status = %d,queue = %d\n",
127 tqp
->tqp_stats
.rcb_rx_ring_pktnum_rcd
+=
128 le32_to_cpu(desc
.data
[1]);
130 hclgevf_cmd_setup_basic_desc(&desc
, HCLGEVF_OPC_QUERY_TX_STATUS
,
133 desc
.data
[0] = cpu_to_le32(tqp
->index
& 0x1ff);
134 status
= hclgevf_cmd_send(&hdev
->hw
, &desc
, 1);
136 dev_err(&hdev
->pdev
->dev
,
137 "Query tqp stat fail, status = %d,queue = %d\n",
141 tqp
->tqp_stats
.rcb_tx_ring_pktnum_rcd
+=
142 le32_to_cpu(desc
.data
[1]);
148 static u64
*hclgevf_tqps_get_stats(struct hnae3_handle
*handle
, u64
*data
)
150 struct hnae3_knic_private_info
*kinfo
= &handle
->kinfo
;
151 struct hclgevf_tqp
*tqp
;
155 for (i
= 0; i
< kinfo
->num_tqps
; i
++) {
156 tqp
= container_of(kinfo
->tqp
[i
], struct hclgevf_tqp
, q
);
157 *buff
++ = tqp
->tqp_stats
.rcb_tx_ring_pktnum_rcd
;
159 for (i
= 0; i
< kinfo
->num_tqps
; i
++) {
160 tqp
= container_of(kinfo
->tqp
[i
], struct hclgevf_tqp
, q
);
161 *buff
++ = tqp
->tqp_stats
.rcb_rx_ring_pktnum_rcd
;
167 static int hclgevf_tqps_get_sset_count(struct hnae3_handle
*handle
, int strset
)
169 struct hnae3_knic_private_info
*kinfo
= &handle
->kinfo
;
171 return kinfo
->num_tqps
* 2;
174 static u8
*hclgevf_tqps_get_strings(struct hnae3_handle
*handle
, u8
*data
)
176 struct hnae3_knic_private_info
*kinfo
= &handle
->kinfo
;
180 for (i
= 0; i
< kinfo
->num_tqps
; i
++) {
181 struct hclgevf_tqp
*tqp
= container_of(kinfo
->tqp
[i
],
182 struct hclgevf_tqp
, q
);
183 snprintf(buff
, ETH_GSTRING_LEN
, "txq%d_pktnum_rcd",
185 buff
+= ETH_GSTRING_LEN
;
188 for (i
= 0; i
< kinfo
->num_tqps
; i
++) {
189 struct hclgevf_tqp
*tqp
= container_of(kinfo
->tqp
[i
],
190 struct hclgevf_tqp
, q
);
191 snprintf(buff
, ETH_GSTRING_LEN
, "rxq%d_pktnum_rcd",
193 buff
+= ETH_GSTRING_LEN
;
199 static void hclgevf_update_stats(struct hnae3_handle
*handle
,
200 struct net_device_stats
*net_stats
)
202 struct hclgevf_dev
*hdev
= hclgevf_ae_get_hdev(handle
);
205 status
= hclgevf_tqps_update_stats(handle
);
207 dev_err(&hdev
->pdev
->dev
,
208 "VF update of TQPS stats fail, status = %d.\n",
212 static int hclgevf_get_sset_count(struct hnae3_handle
*handle
, int strset
)
214 if (strset
== ETH_SS_TEST
)
216 else if (strset
== ETH_SS_STATS
)
217 return hclgevf_tqps_get_sset_count(handle
, strset
);
222 static void hclgevf_get_strings(struct hnae3_handle
*handle
, u32 strset
,
225 u8
*p
= (char *)data
;
227 if (strset
== ETH_SS_STATS
)
228 p
= hclgevf_tqps_get_strings(handle
, p
);
231 static void hclgevf_get_stats(struct hnae3_handle
*handle
, u64
*data
)
233 hclgevf_tqps_get_stats(handle
, data
);
236 static void hclgevf_build_send_msg(struct hclge_vf_to_pf_msg
*msg
, u8 code
,
240 memset(msg
, 0, sizeof(struct hclge_vf_to_pf_msg
));
242 msg
->subcode
= subcode
;
246 static int hclgevf_get_tc_info(struct hclgevf_dev
*hdev
)
248 struct hclge_vf_to_pf_msg send_msg
;
252 hclgevf_build_send_msg(&send_msg
, HCLGE_MBX_GET_TCINFO
, 0);
253 status
= hclgevf_send_mbx_msg(hdev
, &send_msg
, true, &resp_msg
,
256 dev_err(&hdev
->pdev
->dev
,
257 "VF request to get TC info from PF failed %d",
262 hdev
->hw_tc_map
= resp_msg
;
267 static int hclgevf_get_port_base_vlan_filter_state(struct hclgevf_dev
*hdev
)
269 struct hnae3_handle
*nic
= &hdev
->nic
;
270 struct hclge_vf_to_pf_msg send_msg
;
274 hclgevf_build_send_msg(&send_msg
, HCLGE_MBX_SET_VLAN
,
275 HCLGE_MBX_GET_PORT_BASE_VLAN_STATE
);
276 ret
= hclgevf_send_mbx_msg(hdev
, &send_msg
, true, &resp_msg
,
279 dev_err(&hdev
->pdev
->dev
,
280 "VF request to get port based vlan state failed %d",
285 nic
->port_base_vlan_state
= resp_msg
;
290 static int hclgevf_get_queue_info(struct hclgevf_dev
*hdev
)
292 #define HCLGEVF_TQPS_RSS_INFO_LEN 6
293 #define HCLGEVF_TQPS_ALLOC_OFFSET 0
294 #define HCLGEVF_TQPS_RSS_SIZE_OFFSET 2
295 #define HCLGEVF_TQPS_RX_BUFFER_LEN_OFFSET 4
297 u8 resp_msg
[HCLGEVF_TQPS_RSS_INFO_LEN
];
298 struct hclge_vf_to_pf_msg send_msg
;
301 hclgevf_build_send_msg(&send_msg
, HCLGE_MBX_GET_QINFO
, 0);
302 status
= hclgevf_send_mbx_msg(hdev
, &send_msg
, true, resp_msg
,
303 HCLGEVF_TQPS_RSS_INFO_LEN
);
305 dev_err(&hdev
->pdev
->dev
,
306 "VF request to get tqp info from PF failed %d",
311 memcpy(&hdev
->num_tqps
, &resp_msg
[HCLGEVF_TQPS_ALLOC_OFFSET
],
313 memcpy(&hdev
->rss_size_max
, &resp_msg
[HCLGEVF_TQPS_RSS_SIZE_OFFSET
],
315 memcpy(&hdev
->rx_buf_len
, &resp_msg
[HCLGEVF_TQPS_RX_BUFFER_LEN_OFFSET
],
321 static int hclgevf_get_queue_depth(struct hclgevf_dev
*hdev
)
323 #define HCLGEVF_TQPS_DEPTH_INFO_LEN 4
324 #define HCLGEVF_TQPS_NUM_TX_DESC_OFFSET 0
325 #define HCLGEVF_TQPS_NUM_RX_DESC_OFFSET 2
327 u8 resp_msg
[HCLGEVF_TQPS_DEPTH_INFO_LEN
];
328 struct hclge_vf_to_pf_msg send_msg
;
331 hclgevf_build_send_msg(&send_msg
, HCLGE_MBX_GET_QDEPTH
, 0);
332 ret
= hclgevf_send_mbx_msg(hdev
, &send_msg
, true, resp_msg
,
333 HCLGEVF_TQPS_DEPTH_INFO_LEN
);
335 dev_err(&hdev
->pdev
->dev
,
336 "VF request to get tqp depth info from PF failed %d",
341 memcpy(&hdev
->num_tx_desc
, &resp_msg
[HCLGEVF_TQPS_NUM_TX_DESC_OFFSET
],
343 memcpy(&hdev
->num_rx_desc
, &resp_msg
[HCLGEVF_TQPS_NUM_RX_DESC_OFFSET
],
349 static u16
hclgevf_get_qid_global(struct hnae3_handle
*handle
, u16 queue_id
)
351 struct hclgevf_dev
*hdev
= hclgevf_ae_get_hdev(handle
);
352 struct hclge_vf_to_pf_msg send_msg
;
357 hclgevf_build_send_msg(&send_msg
, HCLGE_MBX_GET_QID_IN_PF
, 0);
358 memcpy(send_msg
.data
, &queue_id
, sizeof(queue_id
));
359 ret
= hclgevf_send_mbx_msg(hdev
, &send_msg
, true, resp_data
,
362 qid_in_pf
= *(u16
*)resp_data
;
367 static int hclgevf_get_pf_media_type(struct hclgevf_dev
*hdev
)
369 struct hclge_vf_to_pf_msg send_msg
;
373 hclgevf_build_send_msg(&send_msg
, HCLGE_MBX_GET_MEDIA_TYPE
, 0);
374 ret
= hclgevf_send_mbx_msg(hdev
, &send_msg
, true, resp_msg
,
377 dev_err(&hdev
->pdev
->dev
,
378 "VF request to get the pf port media type failed %d",
383 hdev
->hw
.mac
.media_type
= resp_msg
[0];
384 hdev
->hw
.mac
.module_type
= resp_msg
[1];
389 static int hclgevf_alloc_tqps(struct hclgevf_dev
*hdev
)
391 struct hclgevf_tqp
*tqp
;
394 hdev
->htqp
= devm_kcalloc(&hdev
->pdev
->dev
, hdev
->num_tqps
,
395 sizeof(struct hclgevf_tqp
), GFP_KERNEL
);
401 for (i
= 0; i
< hdev
->num_tqps
; i
++) {
402 tqp
->dev
= &hdev
->pdev
->dev
;
405 tqp
->q
.ae_algo
= &ae_algovf
;
406 tqp
->q
.buf_size
= hdev
->rx_buf_len
;
407 tqp
->q
.tx_desc_num
= hdev
->num_tx_desc
;
408 tqp
->q
.rx_desc_num
= hdev
->num_rx_desc
;
410 /* need an extended offset to configure queues >=
411 * HCLGEVF_TQP_MAX_SIZE_DEV_V2.
413 if (i
< HCLGEVF_TQP_MAX_SIZE_DEV_V2
)
414 tqp
->q
.io_base
= hdev
->hw
.io_base
+
415 HCLGEVF_TQP_REG_OFFSET
+
416 i
* HCLGEVF_TQP_REG_SIZE
;
418 tqp
->q
.io_base
= hdev
->hw
.io_base
+
419 HCLGEVF_TQP_REG_OFFSET
+
420 HCLGEVF_TQP_EXT_REG_OFFSET
+
421 (i
- HCLGEVF_TQP_MAX_SIZE_DEV_V2
) *
422 HCLGEVF_TQP_REG_SIZE
;
430 static int hclgevf_knic_setup(struct hclgevf_dev
*hdev
)
432 struct hnae3_handle
*nic
= &hdev
->nic
;
433 struct hnae3_knic_private_info
*kinfo
;
434 u16 new_tqps
= hdev
->num_tqps
;
439 kinfo
->num_tx_desc
= hdev
->num_tx_desc
;
440 kinfo
->num_rx_desc
= hdev
->num_rx_desc
;
441 kinfo
->rx_buf_len
= hdev
->rx_buf_len
;
442 for (i
= 0; i
< HCLGEVF_MAX_TC_NUM
; i
++)
443 if (hdev
->hw_tc_map
& BIT(i
))
446 num_tc
= num_tc
? num_tc
: 1;
447 kinfo
->tc_info
.num_tc
= num_tc
;
448 kinfo
->rss_size
= min_t(u16
, hdev
->rss_size_max
, new_tqps
/ num_tc
);
449 new_tqps
= kinfo
->rss_size
* num_tc
;
450 kinfo
->num_tqps
= min(new_tqps
, hdev
->num_tqps
);
452 kinfo
->tqp
= devm_kcalloc(&hdev
->pdev
->dev
, kinfo
->num_tqps
,
453 sizeof(struct hnae3_queue
*), GFP_KERNEL
);
457 for (i
= 0; i
< kinfo
->num_tqps
; i
++) {
458 hdev
->htqp
[i
].q
.handle
= &hdev
->nic
;
459 hdev
->htqp
[i
].q
.tqp_index
= i
;
460 kinfo
->tqp
[i
] = &hdev
->htqp
[i
].q
;
463 /* after init the max rss_size and tqps, adjust the default tqp numbers
464 * and rss size with the actual vector numbers
466 kinfo
->num_tqps
= min_t(u16
, hdev
->num_nic_msix
- 1, kinfo
->num_tqps
);
467 kinfo
->rss_size
= min_t(u16
, kinfo
->num_tqps
/ num_tc
,
473 static void hclgevf_request_link_info(struct hclgevf_dev
*hdev
)
475 struct hclge_vf_to_pf_msg send_msg
;
478 hclgevf_build_send_msg(&send_msg
, HCLGE_MBX_GET_LINK_STATUS
, 0);
479 status
= hclgevf_send_mbx_msg(hdev
, &send_msg
, false, NULL
, 0);
481 dev_err(&hdev
->pdev
->dev
,
482 "VF failed to fetch link status(%d) from PF", status
);
485 void hclgevf_update_link_status(struct hclgevf_dev
*hdev
, int link_state
)
487 struct hnae3_handle
*rhandle
= &hdev
->roce
;
488 struct hnae3_handle
*handle
= &hdev
->nic
;
489 struct hnae3_client
*rclient
;
490 struct hnae3_client
*client
;
492 if (test_and_set_bit(HCLGEVF_STATE_LINK_UPDATING
, &hdev
->state
))
495 client
= handle
->client
;
496 rclient
= hdev
->roce_client
;
499 test_bit(HCLGEVF_STATE_DOWN
, &hdev
->state
) ? 0 : link_state
;
501 if (link_state
!= hdev
->hw
.mac
.link
) {
502 client
->ops
->link_status_change(handle
, !!link_state
);
503 if (rclient
&& rclient
->ops
->link_status_change
)
504 rclient
->ops
->link_status_change(rhandle
, !!link_state
);
505 hdev
->hw
.mac
.link
= link_state
;
508 clear_bit(HCLGEVF_STATE_LINK_UPDATING
, &hdev
->state
);
511 static void hclgevf_update_link_mode(struct hclgevf_dev
*hdev
)
513 #define HCLGEVF_ADVERTISING 0
514 #define HCLGEVF_SUPPORTED 1
516 struct hclge_vf_to_pf_msg send_msg
;
518 hclgevf_build_send_msg(&send_msg
, HCLGE_MBX_GET_LINK_MODE
, 0);
519 send_msg
.data
[0] = HCLGEVF_ADVERTISING
;
520 hclgevf_send_mbx_msg(hdev
, &send_msg
, false, NULL
, 0);
521 send_msg
.data
[0] = HCLGEVF_SUPPORTED
;
522 hclgevf_send_mbx_msg(hdev
, &send_msg
, false, NULL
, 0);
525 static int hclgevf_set_handle_info(struct hclgevf_dev
*hdev
)
527 struct hnae3_handle
*nic
= &hdev
->nic
;
530 nic
->ae_algo
= &ae_algovf
;
531 nic
->pdev
= hdev
->pdev
;
532 nic
->numa_node_mask
= hdev
->numa_node_mask
;
533 nic
->flags
|= HNAE3_SUPPORT_VF
;
535 ret
= hclgevf_knic_setup(hdev
);
537 dev_err(&hdev
->pdev
->dev
, "VF knic setup failed %d\n",
542 static void hclgevf_free_vector(struct hclgevf_dev
*hdev
, int vector_id
)
544 if (hdev
->vector_status
[vector_id
] == HCLGEVF_INVALID_VPORT
) {
545 dev_warn(&hdev
->pdev
->dev
,
546 "vector(vector_id %d) has been freed.\n", vector_id
);
550 hdev
->vector_status
[vector_id
] = HCLGEVF_INVALID_VPORT
;
551 hdev
->num_msi_left
+= 1;
552 hdev
->num_msi_used
-= 1;
555 static int hclgevf_get_vector(struct hnae3_handle
*handle
, u16 vector_num
,
556 struct hnae3_vector_info
*vector_info
)
558 struct hclgevf_dev
*hdev
= hclgevf_ae_get_hdev(handle
);
559 struct hnae3_vector_info
*vector
= vector_info
;
563 vector_num
= min_t(u16
, hdev
->num_nic_msix
- 1, vector_num
);
564 vector_num
= min(hdev
->num_msi_left
, vector_num
);
566 for (j
= 0; j
< vector_num
; j
++) {
567 for (i
= HCLGEVF_MISC_VECTOR_NUM
+ 1; i
< hdev
->num_msi
; i
++) {
568 if (hdev
->vector_status
[i
] == HCLGEVF_INVALID_VPORT
) {
569 vector
->vector
= pci_irq_vector(hdev
->pdev
, i
);
570 vector
->io_addr
= hdev
->hw
.io_base
+
571 HCLGEVF_VECTOR_REG_BASE
+
572 (i
- 1) * HCLGEVF_VECTOR_REG_OFFSET
;
573 hdev
->vector_status
[i
] = 0;
574 hdev
->vector_irq
[i
] = vector
->vector
;
583 hdev
->num_msi_left
-= alloc
;
584 hdev
->num_msi_used
+= alloc
;
589 static int hclgevf_get_vector_index(struct hclgevf_dev
*hdev
, int vector
)
593 for (i
= 0; i
< hdev
->num_msi
; i
++)
594 if (vector
== hdev
->vector_irq
[i
])
600 static int hclgevf_set_rss_algo_key(struct hclgevf_dev
*hdev
,
601 const u8 hfunc
, const u8
*key
)
603 struct hclgevf_rss_config_cmd
*req
;
604 unsigned int key_offset
= 0;
605 struct hclgevf_desc desc
;
610 key_counts
= HCLGEVF_RSS_KEY_SIZE
;
611 req
= (struct hclgevf_rss_config_cmd
*)desc
.data
;
614 hclgevf_cmd_setup_basic_desc(&desc
,
615 HCLGEVF_OPC_RSS_GENERIC_CONFIG
,
618 req
->hash_config
|= (hfunc
& HCLGEVF_RSS_HASH_ALGO_MASK
);
620 (key_offset
<< HCLGEVF_RSS_HASH_KEY_OFFSET_B
);
622 key_size
= min(HCLGEVF_RSS_HASH_KEY_NUM
, key_counts
);
623 memcpy(req
->hash_key
,
624 key
+ key_offset
* HCLGEVF_RSS_HASH_KEY_NUM
, key_size
);
626 key_counts
-= key_size
;
628 ret
= hclgevf_cmd_send(&hdev
->hw
, &desc
, 1);
630 dev_err(&hdev
->pdev
->dev
,
631 "Configure RSS config fail, status = %d\n",
640 static u32
hclgevf_get_rss_key_size(struct hnae3_handle
*handle
)
642 return HCLGEVF_RSS_KEY_SIZE
;
645 static u32
hclgevf_get_rss_indir_size(struct hnae3_handle
*handle
)
647 return HCLGEVF_RSS_IND_TBL_SIZE
;
650 static int hclgevf_set_rss_indir_table(struct hclgevf_dev
*hdev
)
652 const u8
*indir
= hdev
->rss_cfg
.rss_indirection_tbl
;
653 struct hclgevf_rss_indirection_table_cmd
*req
;
654 struct hclgevf_desc desc
;
658 req
= (struct hclgevf_rss_indirection_table_cmd
*)desc
.data
;
660 for (i
= 0; i
< HCLGEVF_RSS_CFG_TBL_NUM
; i
++) {
661 hclgevf_cmd_setup_basic_desc(&desc
, HCLGEVF_OPC_RSS_INDIR_TABLE
,
663 req
->start_table_index
= i
* HCLGEVF_RSS_CFG_TBL_SIZE
;
664 req
->rss_set_bitmap
= HCLGEVF_RSS_SET_BITMAP_MSK
;
665 for (j
= 0; j
< HCLGEVF_RSS_CFG_TBL_SIZE
; j
++)
667 indir
[i
* HCLGEVF_RSS_CFG_TBL_SIZE
+ j
];
669 status
= hclgevf_cmd_send(&hdev
->hw
, &desc
, 1);
671 dev_err(&hdev
->pdev
->dev
,
672 "VF failed(=%d) to set RSS indirection table\n",
681 static int hclgevf_set_rss_tc_mode(struct hclgevf_dev
*hdev
, u16 rss_size
)
683 struct hclgevf_rss_tc_mode_cmd
*req
;
684 u16 tc_offset
[HCLGEVF_MAX_TC_NUM
];
685 u16 tc_valid
[HCLGEVF_MAX_TC_NUM
];
686 u16 tc_size
[HCLGEVF_MAX_TC_NUM
];
687 struct hclgevf_desc desc
;
692 req
= (struct hclgevf_rss_tc_mode_cmd
*)desc
.data
;
694 roundup_size
= roundup_pow_of_two(rss_size
);
695 roundup_size
= ilog2(roundup_size
);
697 for (i
= 0; i
< HCLGEVF_MAX_TC_NUM
; i
++) {
698 tc_valid
[i
] = !!(hdev
->hw_tc_map
& BIT(i
));
699 tc_size
[i
] = roundup_size
;
700 tc_offset
[i
] = rss_size
* i
;
703 hclgevf_cmd_setup_basic_desc(&desc
, HCLGEVF_OPC_RSS_TC_MODE
, false);
704 for (i
= 0; i
< HCLGEVF_MAX_TC_NUM
; i
++) {
705 hnae3_set_bit(req
->rss_tc_mode
[i
], HCLGEVF_RSS_TC_VALID_B
,
706 (tc_valid
[i
] & 0x1));
707 hnae3_set_field(req
->rss_tc_mode
[i
], HCLGEVF_RSS_TC_SIZE_M
,
708 HCLGEVF_RSS_TC_SIZE_S
, tc_size
[i
]);
709 hnae3_set_field(req
->rss_tc_mode
[i
], HCLGEVF_RSS_TC_OFFSET_M
,
710 HCLGEVF_RSS_TC_OFFSET_S
, tc_offset
[i
]);
712 status
= hclgevf_cmd_send(&hdev
->hw
, &desc
, 1);
714 dev_err(&hdev
->pdev
->dev
,
715 "VF failed(=%d) to set rss tc mode\n", status
);
720 /* for revision 0x20, vf shared the same rss config with pf */
721 static int hclgevf_get_rss_hash_key(struct hclgevf_dev
*hdev
)
723 #define HCLGEVF_RSS_MBX_RESP_LEN 8
724 struct hclgevf_rss_cfg
*rss_cfg
= &hdev
->rss_cfg
;
725 u8 resp_msg
[HCLGEVF_RSS_MBX_RESP_LEN
];
726 struct hclge_vf_to_pf_msg send_msg
;
727 u16 msg_num
, hash_key_index
;
731 hclgevf_build_send_msg(&send_msg
, HCLGE_MBX_GET_RSS_KEY
, 0);
732 msg_num
= (HCLGEVF_RSS_KEY_SIZE
+ HCLGEVF_RSS_MBX_RESP_LEN
- 1) /
733 HCLGEVF_RSS_MBX_RESP_LEN
;
734 for (index
= 0; index
< msg_num
; index
++) {
735 send_msg
.data
[0] = index
;
736 ret
= hclgevf_send_mbx_msg(hdev
, &send_msg
, true, resp_msg
,
737 HCLGEVF_RSS_MBX_RESP_LEN
);
739 dev_err(&hdev
->pdev
->dev
,
740 "VF get rss hash key from PF failed, ret=%d",
745 hash_key_index
= HCLGEVF_RSS_MBX_RESP_LEN
* index
;
746 if (index
== msg_num
- 1)
747 memcpy(&rss_cfg
->rss_hash_key
[hash_key_index
],
749 HCLGEVF_RSS_KEY_SIZE
- hash_key_index
);
751 memcpy(&rss_cfg
->rss_hash_key
[hash_key_index
],
752 &resp_msg
[0], HCLGEVF_RSS_MBX_RESP_LEN
);
758 static int hclgevf_get_rss(struct hnae3_handle
*handle
, u32
*indir
, u8
*key
,
761 struct hclgevf_dev
*hdev
= hclgevf_ae_get_hdev(handle
);
762 struct hclgevf_rss_cfg
*rss_cfg
= &hdev
->rss_cfg
;
765 if (hdev
->ae_dev
->dev_version
>= HNAE3_DEVICE_VERSION_V2
) {
766 /* Get hash algorithm */
768 switch (rss_cfg
->hash_algo
) {
769 case HCLGEVF_RSS_HASH_ALGO_TOEPLITZ
:
770 *hfunc
= ETH_RSS_HASH_TOP
;
772 case HCLGEVF_RSS_HASH_ALGO_SIMPLE
:
773 *hfunc
= ETH_RSS_HASH_XOR
;
776 *hfunc
= ETH_RSS_HASH_UNKNOWN
;
781 /* Get the RSS Key required by the user */
783 memcpy(key
, rss_cfg
->rss_hash_key
,
784 HCLGEVF_RSS_KEY_SIZE
);
787 *hfunc
= ETH_RSS_HASH_TOP
;
789 ret
= hclgevf_get_rss_hash_key(hdev
);
792 memcpy(key
, rss_cfg
->rss_hash_key
,
793 HCLGEVF_RSS_KEY_SIZE
);
798 for (i
= 0; i
< HCLGEVF_RSS_IND_TBL_SIZE
; i
++)
799 indir
[i
] = rss_cfg
->rss_indirection_tbl
[i
];
804 static int hclgevf_set_rss(struct hnae3_handle
*handle
, const u32
*indir
,
805 const u8
*key
, const u8 hfunc
)
807 struct hclgevf_dev
*hdev
= hclgevf_ae_get_hdev(handle
);
808 struct hclgevf_rss_cfg
*rss_cfg
= &hdev
->rss_cfg
;
811 if (hdev
->ae_dev
->dev_version
>= HNAE3_DEVICE_VERSION_V2
) {
812 /* Set the RSS Hash Key if specififed by the user */
815 case ETH_RSS_HASH_TOP
:
817 HCLGEVF_RSS_HASH_ALGO_TOEPLITZ
;
819 case ETH_RSS_HASH_XOR
:
821 HCLGEVF_RSS_HASH_ALGO_SIMPLE
;
823 case ETH_RSS_HASH_NO_CHANGE
:
829 ret
= hclgevf_set_rss_algo_key(hdev
, rss_cfg
->hash_algo
,
834 /* Update the shadow RSS key with user specified qids */
835 memcpy(rss_cfg
->rss_hash_key
, key
,
836 HCLGEVF_RSS_KEY_SIZE
);
840 /* update the shadow RSS table with user specified qids */
841 for (i
= 0; i
< HCLGEVF_RSS_IND_TBL_SIZE
; i
++)
842 rss_cfg
->rss_indirection_tbl
[i
] = indir
[i
];
844 /* update the hardware */
845 return hclgevf_set_rss_indir_table(hdev
);
848 static u8
hclgevf_get_rss_hash_bits(struct ethtool_rxnfc
*nfc
)
850 u8 hash_sets
= nfc
->data
& RXH_L4_B_0_1
? HCLGEVF_S_PORT_BIT
: 0;
852 if (nfc
->data
& RXH_L4_B_2_3
)
853 hash_sets
|= HCLGEVF_D_PORT_BIT
;
855 hash_sets
&= ~HCLGEVF_D_PORT_BIT
;
857 if (nfc
->data
& RXH_IP_SRC
)
858 hash_sets
|= HCLGEVF_S_IP_BIT
;
860 hash_sets
&= ~HCLGEVF_S_IP_BIT
;
862 if (nfc
->data
& RXH_IP_DST
)
863 hash_sets
|= HCLGEVF_D_IP_BIT
;
865 hash_sets
&= ~HCLGEVF_D_IP_BIT
;
867 if (nfc
->flow_type
== SCTP_V4_FLOW
|| nfc
->flow_type
== SCTP_V6_FLOW
)
868 hash_sets
|= HCLGEVF_V_TAG_BIT
;
873 static int hclgevf_set_rss_tuple(struct hnae3_handle
*handle
,
874 struct ethtool_rxnfc
*nfc
)
876 struct hclgevf_dev
*hdev
= hclgevf_ae_get_hdev(handle
);
877 struct hclgevf_rss_cfg
*rss_cfg
= &hdev
->rss_cfg
;
878 struct hclgevf_rss_input_tuple_cmd
*req
;
879 struct hclgevf_desc desc
;
883 if (hdev
->ae_dev
->dev_version
< HNAE3_DEVICE_VERSION_V2
)
887 ~(RXH_IP_SRC
| RXH_IP_DST
| RXH_L4_B_0_1
| RXH_L4_B_2_3
))
890 req
= (struct hclgevf_rss_input_tuple_cmd
*)desc
.data
;
891 hclgevf_cmd_setup_basic_desc(&desc
, HCLGEVF_OPC_RSS_INPUT_TUPLE
, false);
893 req
->ipv4_tcp_en
= rss_cfg
->rss_tuple_sets
.ipv4_tcp_en
;
894 req
->ipv4_udp_en
= rss_cfg
->rss_tuple_sets
.ipv4_udp_en
;
895 req
->ipv4_sctp_en
= rss_cfg
->rss_tuple_sets
.ipv4_sctp_en
;
896 req
->ipv4_fragment_en
= rss_cfg
->rss_tuple_sets
.ipv4_fragment_en
;
897 req
->ipv6_tcp_en
= rss_cfg
->rss_tuple_sets
.ipv6_tcp_en
;
898 req
->ipv6_udp_en
= rss_cfg
->rss_tuple_sets
.ipv6_udp_en
;
899 req
->ipv6_sctp_en
= rss_cfg
->rss_tuple_sets
.ipv6_sctp_en
;
900 req
->ipv6_fragment_en
= rss_cfg
->rss_tuple_sets
.ipv6_fragment_en
;
902 tuple_sets
= hclgevf_get_rss_hash_bits(nfc
);
903 switch (nfc
->flow_type
) {
905 req
->ipv4_tcp_en
= tuple_sets
;
908 req
->ipv6_tcp_en
= tuple_sets
;
911 req
->ipv4_udp_en
= tuple_sets
;
914 req
->ipv6_udp_en
= tuple_sets
;
917 req
->ipv4_sctp_en
= tuple_sets
;
920 if (hdev
->ae_dev
->dev_version
<= HNAE3_DEVICE_VERSION_V2
&&
921 (nfc
->data
& (RXH_L4_B_0_1
| RXH_L4_B_2_3
)))
924 req
->ipv6_sctp_en
= tuple_sets
;
927 req
->ipv4_fragment_en
= HCLGEVF_RSS_INPUT_TUPLE_OTHER
;
930 req
->ipv6_fragment_en
= HCLGEVF_RSS_INPUT_TUPLE_OTHER
;
936 ret
= hclgevf_cmd_send(&hdev
->hw
, &desc
, 1);
938 dev_err(&hdev
->pdev
->dev
,
939 "Set rss tuple fail, status = %d\n", ret
);
943 rss_cfg
->rss_tuple_sets
.ipv4_tcp_en
= req
->ipv4_tcp_en
;
944 rss_cfg
->rss_tuple_sets
.ipv4_udp_en
= req
->ipv4_udp_en
;
945 rss_cfg
->rss_tuple_sets
.ipv4_sctp_en
= req
->ipv4_sctp_en
;
946 rss_cfg
->rss_tuple_sets
.ipv4_fragment_en
= req
->ipv4_fragment_en
;
947 rss_cfg
->rss_tuple_sets
.ipv6_tcp_en
= req
->ipv6_tcp_en
;
948 rss_cfg
->rss_tuple_sets
.ipv6_udp_en
= req
->ipv6_udp_en
;
949 rss_cfg
->rss_tuple_sets
.ipv6_sctp_en
= req
->ipv6_sctp_en
;
950 rss_cfg
->rss_tuple_sets
.ipv6_fragment_en
= req
->ipv6_fragment_en
;
954 static int hclgevf_get_rss_tuple(struct hnae3_handle
*handle
,
955 struct ethtool_rxnfc
*nfc
)
957 struct hclgevf_dev
*hdev
= hclgevf_ae_get_hdev(handle
);
958 struct hclgevf_rss_cfg
*rss_cfg
= &hdev
->rss_cfg
;
961 if (hdev
->ae_dev
->dev_version
< HNAE3_DEVICE_VERSION_V2
)
966 switch (nfc
->flow_type
) {
968 tuple_sets
= rss_cfg
->rss_tuple_sets
.ipv4_tcp_en
;
971 tuple_sets
= rss_cfg
->rss_tuple_sets
.ipv4_udp_en
;
974 tuple_sets
= rss_cfg
->rss_tuple_sets
.ipv6_tcp_en
;
977 tuple_sets
= rss_cfg
->rss_tuple_sets
.ipv6_udp_en
;
980 tuple_sets
= rss_cfg
->rss_tuple_sets
.ipv4_sctp_en
;
983 tuple_sets
= rss_cfg
->rss_tuple_sets
.ipv6_sctp_en
;
987 tuple_sets
= HCLGEVF_S_IP_BIT
| HCLGEVF_D_IP_BIT
;
996 if (tuple_sets
& HCLGEVF_D_PORT_BIT
)
997 nfc
->data
|= RXH_L4_B_2_3
;
998 if (tuple_sets
& HCLGEVF_S_PORT_BIT
)
999 nfc
->data
|= RXH_L4_B_0_1
;
1000 if (tuple_sets
& HCLGEVF_D_IP_BIT
)
1001 nfc
->data
|= RXH_IP_DST
;
1002 if (tuple_sets
& HCLGEVF_S_IP_BIT
)
1003 nfc
->data
|= RXH_IP_SRC
;
1008 static int hclgevf_set_rss_input_tuple(struct hclgevf_dev
*hdev
,
1009 struct hclgevf_rss_cfg
*rss_cfg
)
1011 struct hclgevf_rss_input_tuple_cmd
*req
;
1012 struct hclgevf_desc desc
;
1015 hclgevf_cmd_setup_basic_desc(&desc
, HCLGEVF_OPC_RSS_INPUT_TUPLE
, false);
1017 req
= (struct hclgevf_rss_input_tuple_cmd
*)desc
.data
;
1019 req
->ipv4_tcp_en
= rss_cfg
->rss_tuple_sets
.ipv4_tcp_en
;
1020 req
->ipv4_udp_en
= rss_cfg
->rss_tuple_sets
.ipv4_udp_en
;
1021 req
->ipv4_sctp_en
= rss_cfg
->rss_tuple_sets
.ipv4_sctp_en
;
1022 req
->ipv4_fragment_en
= rss_cfg
->rss_tuple_sets
.ipv4_fragment_en
;
1023 req
->ipv6_tcp_en
= rss_cfg
->rss_tuple_sets
.ipv6_tcp_en
;
1024 req
->ipv6_udp_en
= rss_cfg
->rss_tuple_sets
.ipv6_udp_en
;
1025 req
->ipv6_sctp_en
= rss_cfg
->rss_tuple_sets
.ipv6_sctp_en
;
1026 req
->ipv6_fragment_en
= rss_cfg
->rss_tuple_sets
.ipv6_fragment_en
;
1028 ret
= hclgevf_cmd_send(&hdev
->hw
, &desc
, 1);
1030 dev_err(&hdev
->pdev
->dev
,
1031 "Configure rss input fail, status = %d\n", ret
);
1035 static int hclgevf_get_tc_size(struct hnae3_handle
*handle
)
1037 struct hclgevf_dev
*hdev
= hclgevf_ae_get_hdev(handle
);
1038 struct hclgevf_rss_cfg
*rss_cfg
= &hdev
->rss_cfg
;
1040 return rss_cfg
->rss_size
;
1043 static int hclgevf_bind_ring_to_vector(struct hnae3_handle
*handle
, bool en
,
1045 struct hnae3_ring_chain_node
*ring_chain
)
1047 struct hclgevf_dev
*hdev
= hclgevf_ae_get_hdev(handle
);
1048 struct hclge_vf_to_pf_msg send_msg
;
1049 struct hnae3_ring_chain_node
*node
;
1053 memset(&send_msg
, 0, sizeof(send_msg
));
1054 send_msg
.code
= en
? HCLGE_MBX_MAP_RING_TO_VECTOR
:
1055 HCLGE_MBX_UNMAP_RING_TO_VECTOR
;
1056 send_msg
.vector_id
= vector_id
;
1058 for (node
= ring_chain
; node
; node
= node
->next
) {
1059 send_msg
.param
[i
].ring_type
=
1060 hnae3_get_bit(node
->flag
, HNAE3_RING_TYPE_B
);
1062 send_msg
.param
[i
].tqp_index
= node
->tqp_index
;
1063 send_msg
.param
[i
].int_gl_index
=
1064 hnae3_get_field(node
->int_gl_idx
,
1065 HNAE3_RING_GL_IDX_M
,
1066 HNAE3_RING_GL_IDX_S
);
1069 if (i
== HCLGE_MBX_MAX_RING_CHAIN_PARAM_NUM
|| !node
->next
) {
1070 send_msg
.ring_num
= i
;
1072 status
= hclgevf_send_mbx_msg(hdev
, &send_msg
, false,
1075 dev_err(&hdev
->pdev
->dev
,
1076 "Map TQP fail, status is %d.\n",
1087 static int hclgevf_map_ring_to_vector(struct hnae3_handle
*handle
, int vector
,
1088 struct hnae3_ring_chain_node
*ring_chain
)
1090 struct hclgevf_dev
*hdev
= hclgevf_ae_get_hdev(handle
);
1093 vector_id
= hclgevf_get_vector_index(hdev
, vector
);
1094 if (vector_id
< 0) {
1095 dev_err(&handle
->pdev
->dev
,
1096 "Get vector index fail. ret =%d\n", vector_id
);
1100 return hclgevf_bind_ring_to_vector(handle
, true, vector_id
, ring_chain
);
1103 static int hclgevf_unmap_ring_from_vector(
1104 struct hnae3_handle
*handle
,
1106 struct hnae3_ring_chain_node
*ring_chain
)
1108 struct hclgevf_dev
*hdev
= hclgevf_ae_get_hdev(handle
);
1111 if (test_bit(HCLGEVF_STATE_RST_HANDLING
, &hdev
->state
))
1114 vector_id
= hclgevf_get_vector_index(hdev
, vector
);
1115 if (vector_id
< 0) {
1116 dev_err(&handle
->pdev
->dev
,
1117 "Get vector index fail. ret =%d\n", vector_id
);
1121 ret
= hclgevf_bind_ring_to_vector(handle
, false, vector_id
, ring_chain
);
1123 dev_err(&handle
->pdev
->dev
,
1124 "Unmap ring from vector fail. vector=%d, ret =%d\n",
1131 static int hclgevf_put_vector(struct hnae3_handle
*handle
, int vector
)
1133 struct hclgevf_dev
*hdev
= hclgevf_ae_get_hdev(handle
);
1136 vector_id
= hclgevf_get_vector_index(hdev
, vector
);
1137 if (vector_id
< 0) {
1138 dev_err(&handle
->pdev
->dev
,
1139 "hclgevf_put_vector get vector index fail. ret =%d\n",
1144 hclgevf_free_vector(hdev
, vector_id
);
1149 static int hclgevf_cmd_set_promisc_mode(struct hclgevf_dev
*hdev
,
1150 bool en_uc_pmc
, bool en_mc_pmc
,
1153 struct hnae3_handle
*handle
= &hdev
->nic
;
1154 struct hclge_vf_to_pf_msg send_msg
;
1157 memset(&send_msg
, 0, sizeof(send_msg
));
1158 send_msg
.code
= HCLGE_MBX_SET_PROMISC_MODE
;
1159 send_msg
.en_bc
= en_bc_pmc
? 1 : 0;
1160 send_msg
.en_uc
= en_uc_pmc
? 1 : 0;
1161 send_msg
.en_mc
= en_mc_pmc
? 1 : 0;
1162 send_msg
.en_limit_promisc
= test_bit(HNAE3_PFLAG_LIMIT_PROMISC
,
1163 &handle
->priv_flags
) ? 1 : 0;
1165 ret
= hclgevf_send_mbx_msg(hdev
, &send_msg
, false, NULL
, 0);
1167 dev_err(&hdev
->pdev
->dev
,
1168 "Set promisc mode fail, status is %d.\n", ret
);
1173 static int hclgevf_set_promisc_mode(struct hnae3_handle
*handle
, bool en_uc_pmc
,
1176 struct hclgevf_dev
*hdev
= hclgevf_ae_get_hdev(handle
);
1179 en_bc_pmc
= hdev
->ae_dev
->dev_version
>= HNAE3_DEVICE_VERSION_V2
;
1181 return hclgevf_cmd_set_promisc_mode(hdev
, en_uc_pmc
, en_mc_pmc
,
1185 static void hclgevf_request_update_promisc_mode(struct hnae3_handle
*handle
)
1187 struct hclgevf_dev
*hdev
= hclgevf_ae_get_hdev(handle
);
1189 set_bit(HCLGEVF_STATE_PROMISC_CHANGED
, &hdev
->state
);
1190 hclgevf_task_schedule(hdev
, 0);
1193 static void hclgevf_sync_promisc_mode(struct hclgevf_dev
*hdev
)
1195 struct hnae3_handle
*handle
= &hdev
->nic
;
1196 bool en_uc_pmc
= handle
->netdev_flags
& HNAE3_UPE
;
1197 bool en_mc_pmc
= handle
->netdev_flags
& HNAE3_MPE
;
1200 if (test_bit(HCLGEVF_STATE_PROMISC_CHANGED
, &hdev
->state
)) {
1201 ret
= hclgevf_set_promisc_mode(handle
, en_uc_pmc
, en_mc_pmc
);
1203 clear_bit(HCLGEVF_STATE_PROMISC_CHANGED
, &hdev
->state
);
1207 static int hclgevf_tqp_enable(struct hclgevf_dev
*hdev
, unsigned int tqp_id
,
1208 int stream_id
, bool enable
)
1210 struct hclgevf_cfg_com_tqp_queue_cmd
*req
;
1211 struct hclgevf_desc desc
;
1214 req
= (struct hclgevf_cfg_com_tqp_queue_cmd
*)desc
.data
;
1216 hclgevf_cmd_setup_basic_desc(&desc
, HCLGEVF_OPC_CFG_COM_TQP_QUEUE
,
1218 req
->tqp_id
= cpu_to_le16(tqp_id
& HCLGEVF_RING_ID_MASK
);
1219 req
->stream_id
= cpu_to_le16(stream_id
);
1221 req
->enable
|= 1U << HCLGEVF_TQP_ENABLE_B
;
1223 status
= hclgevf_cmd_send(&hdev
->hw
, &desc
, 1);
1225 dev_err(&hdev
->pdev
->dev
,
1226 "TQP enable fail, status =%d.\n", status
);
1231 static void hclgevf_reset_tqp_stats(struct hnae3_handle
*handle
)
1233 struct hnae3_knic_private_info
*kinfo
= &handle
->kinfo
;
1234 struct hclgevf_tqp
*tqp
;
1237 for (i
= 0; i
< kinfo
->num_tqps
; i
++) {
1238 tqp
= container_of(kinfo
->tqp
[i
], struct hclgevf_tqp
, q
);
1239 memset(&tqp
->tqp_stats
, 0, sizeof(tqp
->tqp_stats
));
1243 static int hclgevf_get_host_mac_addr(struct hclgevf_dev
*hdev
, u8
*p
)
1245 struct hclge_vf_to_pf_msg send_msg
;
1246 u8 host_mac
[ETH_ALEN
];
1249 hclgevf_build_send_msg(&send_msg
, HCLGE_MBX_GET_MAC_ADDR
, 0);
1250 status
= hclgevf_send_mbx_msg(hdev
, &send_msg
, true, host_mac
,
1253 dev_err(&hdev
->pdev
->dev
,
1254 "fail to get VF MAC from host %d", status
);
1258 ether_addr_copy(p
, host_mac
);
1263 static void hclgevf_get_mac_addr(struct hnae3_handle
*handle
, u8
*p
)
1265 struct hclgevf_dev
*hdev
= hclgevf_ae_get_hdev(handle
);
1266 u8 host_mac_addr
[ETH_ALEN
];
1268 if (hclgevf_get_host_mac_addr(hdev
, host_mac_addr
))
1271 hdev
->has_pf_mac
= !is_zero_ether_addr(host_mac_addr
);
1272 if (hdev
->has_pf_mac
)
1273 ether_addr_copy(p
, host_mac_addr
);
1275 ether_addr_copy(p
, hdev
->hw
.mac
.mac_addr
);
1278 static int hclgevf_set_mac_addr(struct hnae3_handle
*handle
, void *p
,
1281 struct hclgevf_dev
*hdev
= hclgevf_ae_get_hdev(handle
);
1282 u8
*old_mac_addr
= (u8
*)hdev
->hw
.mac
.mac_addr
;
1283 struct hclge_vf_to_pf_msg send_msg
;
1284 u8
*new_mac_addr
= (u8
*)p
;
1287 hclgevf_build_send_msg(&send_msg
, HCLGE_MBX_SET_UNICAST
, 0);
1288 send_msg
.subcode
= HCLGE_MBX_MAC_VLAN_UC_MODIFY
;
1289 ether_addr_copy(send_msg
.data
, new_mac_addr
);
1290 if (is_first
&& !hdev
->has_pf_mac
)
1291 eth_zero_addr(&send_msg
.data
[ETH_ALEN
]);
1293 ether_addr_copy(&send_msg
.data
[ETH_ALEN
], old_mac_addr
);
1294 status
= hclgevf_send_mbx_msg(hdev
, &send_msg
, true, NULL
, 0);
1296 ether_addr_copy(hdev
->hw
.mac
.mac_addr
, new_mac_addr
);
1301 static struct hclgevf_mac_addr_node
*
1302 hclgevf_find_mac_node(struct list_head
*list
, const u8
*mac_addr
)
1304 struct hclgevf_mac_addr_node
*mac_node
, *tmp
;
1306 list_for_each_entry_safe(mac_node
, tmp
, list
, node
)
1307 if (ether_addr_equal(mac_addr
, mac_node
->mac_addr
))
1313 static void hclgevf_update_mac_node(struct hclgevf_mac_addr_node
*mac_node
,
1314 enum HCLGEVF_MAC_NODE_STATE state
)
1317 /* from set_rx_mode or tmp_add_list */
1318 case HCLGEVF_MAC_TO_ADD
:
1319 if (mac_node
->state
== HCLGEVF_MAC_TO_DEL
)
1320 mac_node
->state
= HCLGEVF_MAC_ACTIVE
;
1322 /* only from set_rx_mode */
1323 case HCLGEVF_MAC_TO_DEL
:
1324 if (mac_node
->state
== HCLGEVF_MAC_TO_ADD
) {
1325 list_del(&mac_node
->node
);
1328 mac_node
->state
= HCLGEVF_MAC_TO_DEL
;
1331 /* only from tmp_add_list, the mac_node->state won't be
1332 * HCLGEVF_MAC_ACTIVE
1334 case HCLGEVF_MAC_ACTIVE
:
1335 if (mac_node
->state
== HCLGEVF_MAC_TO_ADD
)
1336 mac_node
->state
= HCLGEVF_MAC_ACTIVE
;
1341 static int hclgevf_update_mac_list(struct hnae3_handle
*handle
,
1342 enum HCLGEVF_MAC_NODE_STATE state
,
1343 enum HCLGEVF_MAC_ADDR_TYPE mac_type
,
1344 const unsigned char *addr
)
1346 struct hclgevf_dev
*hdev
= hclgevf_ae_get_hdev(handle
);
1347 struct hclgevf_mac_addr_node
*mac_node
;
1348 struct list_head
*list
;
1350 list
= (mac_type
== HCLGEVF_MAC_ADDR_UC
) ?
1351 &hdev
->mac_table
.uc_mac_list
: &hdev
->mac_table
.mc_mac_list
;
1353 spin_lock_bh(&hdev
->mac_table
.mac_list_lock
);
1355 /* if the mac addr is already in the mac list, no need to add a new
1356 * one into it, just check the mac addr state, convert it to a new
1357 * new state, or just remove it, or do nothing.
1359 mac_node
= hclgevf_find_mac_node(list
, addr
);
1361 hclgevf_update_mac_node(mac_node
, state
);
1362 spin_unlock_bh(&hdev
->mac_table
.mac_list_lock
);
1365 /* if this address is never added, unnecessary to delete */
1366 if (state
== HCLGEVF_MAC_TO_DEL
) {
1367 spin_unlock_bh(&hdev
->mac_table
.mac_list_lock
);
1371 mac_node
= kzalloc(sizeof(*mac_node
), GFP_ATOMIC
);
1373 spin_unlock_bh(&hdev
->mac_table
.mac_list_lock
);
1377 mac_node
->state
= state
;
1378 ether_addr_copy(mac_node
->mac_addr
, addr
);
1379 list_add_tail(&mac_node
->node
, list
);
1381 spin_unlock_bh(&hdev
->mac_table
.mac_list_lock
);
1385 static int hclgevf_add_uc_addr(struct hnae3_handle
*handle
,
1386 const unsigned char *addr
)
1388 return hclgevf_update_mac_list(handle
, HCLGEVF_MAC_TO_ADD
,
1389 HCLGEVF_MAC_ADDR_UC
, addr
);
1392 static int hclgevf_rm_uc_addr(struct hnae3_handle
*handle
,
1393 const unsigned char *addr
)
1395 return hclgevf_update_mac_list(handle
, HCLGEVF_MAC_TO_DEL
,
1396 HCLGEVF_MAC_ADDR_UC
, addr
);
1399 static int hclgevf_add_mc_addr(struct hnae3_handle
*handle
,
1400 const unsigned char *addr
)
1402 return hclgevf_update_mac_list(handle
, HCLGEVF_MAC_TO_ADD
,
1403 HCLGEVF_MAC_ADDR_MC
, addr
);
1406 static int hclgevf_rm_mc_addr(struct hnae3_handle
*handle
,
1407 const unsigned char *addr
)
1409 return hclgevf_update_mac_list(handle
, HCLGEVF_MAC_TO_DEL
,
1410 HCLGEVF_MAC_ADDR_MC
, addr
);
1413 static int hclgevf_add_del_mac_addr(struct hclgevf_dev
*hdev
,
1414 struct hclgevf_mac_addr_node
*mac_node
,
1415 enum HCLGEVF_MAC_ADDR_TYPE mac_type
)
1417 struct hclge_vf_to_pf_msg send_msg
;
1420 if (mac_type
== HCLGEVF_MAC_ADDR_UC
) {
1421 code
= HCLGE_MBX_SET_UNICAST
;
1422 if (mac_node
->state
== HCLGEVF_MAC_TO_ADD
)
1423 subcode
= HCLGE_MBX_MAC_VLAN_UC_ADD
;
1425 subcode
= HCLGE_MBX_MAC_VLAN_UC_REMOVE
;
1427 code
= HCLGE_MBX_SET_MULTICAST
;
1428 if (mac_node
->state
== HCLGEVF_MAC_TO_ADD
)
1429 subcode
= HCLGE_MBX_MAC_VLAN_MC_ADD
;
1431 subcode
= HCLGE_MBX_MAC_VLAN_MC_REMOVE
;
1434 hclgevf_build_send_msg(&send_msg
, code
, subcode
);
1435 ether_addr_copy(send_msg
.data
, mac_node
->mac_addr
);
1436 return hclgevf_send_mbx_msg(hdev
, &send_msg
, false, NULL
, 0);
1439 static void hclgevf_config_mac_list(struct hclgevf_dev
*hdev
,
1440 struct list_head
*list
,
1441 enum HCLGEVF_MAC_ADDR_TYPE mac_type
)
1443 struct hclgevf_mac_addr_node
*mac_node
, *tmp
;
1446 list_for_each_entry_safe(mac_node
, tmp
, list
, node
) {
1447 ret
= hclgevf_add_del_mac_addr(hdev
, mac_node
, mac_type
);
1449 dev_err(&hdev
->pdev
->dev
,
1450 "failed to configure mac %pM, state = %d, ret = %d\n",
1451 mac_node
->mac_addr
, mac_node
->state
, ret
);
1454 if (mac_node
->state
== HCLGEVF_MAC_TO_ADD
) {
1455 mac_node
->state
= HCLGEVF_MAC_ACTIVE
;
1457 list_del(&mac_node
->node
);
1463 static void hclgevf_sync_from_add_list(struct list_head
*add_list
,
1464 struct list_head
*mac_list
)
1466 struct hclgevf_mac_addr_node
*mac_node
, *tmp
, *new_node
;
1468 list_for_each_entry_safe(mac_node
, tmp
, add_list
, node
) {
1469 /* if the mac address from tmp_add_list is not in the
1470 * uc/mc_mac_list, it means have received a TO_DEL request
1471 * during the time window of sending mac config request to PF
1472 * If mac_node state is ACTIVE, then change its state to TO_DEL,
1473 * then it will be removed at next time. If is TO_ADD, it means
1474 * send TO_ADD request failed, so just remove the mac node.
1476 new_node
= hclgevf_find_mac_node(mac_list
, mac_node
->mac_addr
);
1478 hclgevf_update_mac_node(new_node
, mac_node
->state
);
1479 list_del(&mac_node
->node
);
1481 } else if (mac_node
->state
== HCLGEVF_MAC_ACTIVE
) {
1482 mac_node
->state
= HCLGEVF_MAC_TO_DEL
;
1483 list_del(&mac_node
->node
);
1484 list_add_tail(&mac_node
->node
, mac_list
);
1486 list_del(&mac_node
->node
);
1492 static void hclgevf_sync_from_del_list(struct list_head
*del_list
,
1493 struct list_head
*mac_list
)
1495 struct hclgevf_mac_addr_node
*mac_node
, *tmp
, *new_node
;
1497 list_for_each_entry_safe(mac_node
, tmp
, del_list
, node
) {
1498 new_node
= hclgevf_find_mac_node(mac_list
, mac_node
->mac_addr
);
1500 /* If the mac addr is exist in the mac list, it means
1501 * received a new request TO_ADD during the time window
1502 * of sending mac addr configurrequest to PF, so just
1503 * change the mac state to ACTIVE.
1505 new_node
->state
= HCLGEVF_MAC_ACTIVE
;
1506 list_del(&mac_node
->node
);
1509 list_del(&mac_node
->node
);
1510 list_add_tail(&mac_node
->node
, mac_list
);
1515 static void hclgevf_clear_list(struct list_head
*list
)
1517 struct hclgevf_mac_addr_node
*mac_node
, *tmp
;
1519 list_for_each_entry_safe(mac_node
, tmp
, list
, node
) {
1520 list_del(&mac_node
->node
);
1525 static void hclgevf_sync_mac_list(struct hclgevf_dev
*hdev
,
1526 enum HCLGEVF_MAC_ADDR_TYPE mac_type
)
1528 struct hclgevf_mac_addr_node
*mac_node
, *tmp
, *new_node
;
1529 struct list_head tmp_add_list
, tmp_del_list
;
1530 struct list_head
*list
;
1532 INIT_LIST_HEAD(&tmp_add_list
);
1533 INIT_LIST_HEAD(&tmp_del_list
);
1535 /* move the mac addr to the tmp_add_list and tmp_del_list, then
1536 * we can add/delete these mac addr outside the spin lock
1538 list
= (mac_type
== HCLGEVF_MAC_ADDR_UC
) ?
1539 &hdev
->mac_table
.uc_mac_list
: &hdev
->mac_table
.mc_mac_list
;
1541 spin_lock_bh(&hdev
->mac_table
.mac_list_lock
);
1543 list_for_each_entry_safe(mac_node
, tmp
, list
, node
) {
1544 switch (mac_node
->state
) {
1545 case HCLGEVF_MAC_TO_DEL
:
1546 list_del(&mac_node
->node
);
1547 list_add_tail(&mac_node
->node
, &tmp_del_list
);
1549 case HCLGEVF_MAC_TO_ADD
:
1550 new_node
= kzalloc(sizeof(*new_node
), GFP_ATOMIC
);
1554 ether_addr_copy(new_node
->mac_addr
, mac_node
->mac_addr
);
1555 new_node
->state
= mac_node
->state
;
1556 list_add_tail(&new_node
->node
, &tmp_add_list
);
1564 spin_unlock_bh(&hdev
->mac_table
.mac_list_lock
);
1566 /* delete first, in order to get max mac table space for adding */
1567 hclgevf_config_mac_list(hdev
, &tmp_del_list
, mac_type
);
1568 hclgevf_config_mac_list(hdev
, &tmp_add_list
, mac_type
);
1570 /* if some mac addresses were added/deleted fail, move back to the
1571 * mac_list, and retry at next time.
1573 spin_lock_bh(&hdev
->mac_table
.mac_list_lock
);
1575 hclgevf_sync_from_del_list(&tmp_del_list
, list
);
1576 hclgevf_sync_from_add_list(&tmp_add_list
, list
);
1578 spin_unlock_bh(&hdev
->mac_table
.mac_list_lock
);
1581 static void hclgevf_sync_mac_table(struct hclgevf_dev
*hdev
)
1583 hclgevf_sync_mac_list(hdev
, HCLGEVF_MAC_ADDR_UC
);
1584 hclgevf_sync_mac_list(hdev
, HCLGEVF_MAC_ADDR_MC
);
1587 static void hclgevf_uninit_mac_list(struct hclgevf_dev
*hdev
)
1589 spin_lock_bh(&hdev
->mac_table
.mac_list_lock
);
1591 hclgevf_clear_list(&hdev
->mac_table
.uc_mac_list
);
1592 hclgevf_clear_list(&hdev
->mac_table
.mc_mac_list
);
1594 spin_unlock_bh(&hdev
->mac_table
.mac_list_lock
);
1597 static int hclgevf_set_vlan_filter(struct hnae3_handle
*handle
,
1598 __be16 proto
, u16 vlan_id
,
1601 #define HCLGEVF_VLAN_MBX_IS_KILL_OFFSET 0
1602 #define HCLGEVF_VLAN_MBX_VLAN_ID_OFFSET 1
1603 #define HCLGEVF_VLAN_MBX_PROTO_OFFSET 3
1605 struct hclgevf_dev
*hdev
= hclgevf_ae_get_hdev(handle
);
1606 struct hclge_vf_to_pf_msg send_msg
;
1609 if (vlan_id
> HCLGEVF_MAX_VLAN_ID
)
1612 if (proto
!= htons(ETH_P_8021Q
))
1613 return -EPROTONOSUPPORT
;
1615 /* When device is resetting or reset failed, firmware is unable to
1616 * handle mailbox. Just record the vlan id, and remove it after
1619 if ((test_bit(HCLGEVF_STATE_RST_HANDLING
, &hdev
->state
) ||
1620 test_bit(HCLGEVF_STATE_RST_FAIL
, &hdev
->state
)) && is_kill
) {
1621 set_bit(vlan_id
, hdev
->vlan_del_fail_bmap
);
1625 hclgevf_build_send_msg(&send_msg
, HCLGE_MBX_SET_VLAN
,
1626 HCLGE_MBX_VLAN_FILTER
);
1627 send_msg
.data
[HCLGEVF_VLAN_MBX_IS_KILL_OFFSET
] = is_kill
;
1628 memcpy(&send_msg
.data
[HCLGEVF_VLAN_MBX_VLAN_ID_OFFSET
], &vlan_id
,
1630 memcpy(&send_msg
.data
[HCLGEVF_VLAN_MBX_PROTO_OFFSET
], &proto
,
1632 /* when remove hw vlan filter failed, record the vlan id,
1633 * and try to remove it from hw later, to be consistence
1636 ret
= hclgevf_send_mbx_msg(hdev
, &send_msg
, true, NULL
, 0);
1638 set_bit(vlan_id
, hdev
->vlan_del_fail_bmap
);
1643 static void hclgevf_sync_vlan_filter(struct hclgevf_dev
*hdev
)
1645 #define HCLGEVF_MAX_SYNC_COUNT 60
1646 struct hnae3_handle
*handle
= &hdev
->nic
;
1647 int ret
, sync_cnt
= 0;
1650 vlan_id
= find_first_bit(hdev
->vlan_del_fail_bmap
, VLAN_N_VID
);
1651 while (vlan_id
!= VLAN_N_VID
) {
1652 ret
= hclgevf_set_vlan_filter(handle
, htons(ETH_P_8021Q
),
1657 clear_bit(vlan_id
, hdev
->vlan_del_fail_bmap
);
1659 if (sync_cnt
>= HCLGEVF_MAX_SYNC_COUNT
)
1662 vlan_id
= find_first_bit(hdev
->vlan_del_fail_bmap
, VLAN_N_VID
);
1666 static int hclgevf_en_hw_strip_rxvtag(struct hnae3_handle
*handle
, bool enable
)
1668 struct hclgevf_dev
*hdev
= hclgevf_ae_get_hdev(handle
);
1669 struct hclge_vf_to_pf_msg send_msg
;
1671 hclgevf_build_send_msg(&send_msg
, HCLGE_MBX_SET_VLAN
,
1672 HCLGE_MBX_VLAN_RX_OFF_CFG
);
1673 send_msg
.data
[0] = enable
? 1 : 0;
1674 return hclgevf_send_mbx_msg(hdev
, &send_msg
, false, NULL
, 0);
1677 static int hclgevf_reset_tqp(struct hnae3_handle
*handle
, u16 queue_id
)
1679 struct hclgevf_dev
*hdev
= hclgevf_ae_get_hdev(handle
);
1680 struct hclge_vf_to_pf_msg send_msg
;
1683 /* disable vf queue before send queue reset msg to PF */
1684 ret
= hclgevf_tqp_enable(hdev
, queue_id
, 0, false);
1688 hclgevf_build_send_msg(&send_msg
, HCLGE_MBX_QUEUE_RESET
, 0);
1689 memcpy(send_msg
.data
, &queue_id
, sizeof(queue_id
));
1690 return hclgevf_send_mbx_msg(hdev
, &send_msg
, true, NULL
, 0);
1693 static int hclgevf_set_mtu(struct hnae3_handle
*handle
, int new_mtu
)
1695 struct hclgevf_dev
*hdev
= hclgevf_ae_get_hdev(handle
);
1696 struct hclge_vf_to_pf_msg send_msg
;
1698 hclgevf_build_send_msg(&send_msg
, HCLGE_MBX_SET_MTU
, 0);
1699 memcpy(send_msg
.data
, &new_mtu
, sizeof(new_mtu
));
1700 return hclgevf_send_mbx_msg(hdev
, &send_msg
, true, NULL
, 0);
1703 static int hclgevf_notify_client(struct hclgevf_dev
*hdev
,
1704 enum hnae3_reset_notify_type type
)
1706 struct hnae3_client
*client
= hdev
->nic_client
;
1707 struct hnae3_handle
*handle
= &hdev
->nic
;
1710 if (!test_bit(HCLGEVF_STATE_NIC_REGISTERED
, &hdev
->state
) ||
1714 if (!client
->ops
->reset_notify
)
1717 ret
= client
->ops
->reset_notify(handle
, type
);
1719 dev_err(&hdev
->pdev
->dev
, "notify nic client failed %d(%d)\n",
1725 static int hclgevf_notify_roce_client(struct hclgevf_dev
*hdev
,
1726 enum hnae3_reset_notify_type type
)
1728 struct hnae3_client
*client
= hdev
->roce_client
;
1729 struct hnae3_handle
*handle
= &hdev
->roce
;
1732 if (!test_bit(HCLGEVF_STATE_ROCE_REGISTERED
, &hdev
->state
) || !client
)
1735 if (!client
->ops
->reset_notify
)
1738 ret
= client
->ops
->reset_notify(handle
, type
);
1740 dev_err(&hdev
->pdev
->dev
, "notify roce client failed %d(%d)",
1745 static int hclgevf_reset_wait(struct hclgevf_dev
*hdev
)
1747 #define HCLGEVF_RESET_WAIT_US 20000
1748 #define HCLGEVF_RESET_WAIT_CNT 2000
1749 #define HCLGEVF_RESET_WAIT_TIMEOUT_US \
1750 (HCLGEVF_RESET_WAIT_US * HCLGEVF_RESET_WAIT_CNT)
1755 if (hdev
->reset_type
== HNAE3_VF_RESET
)
1756 ret
= readl_poll_timeout(hdev
->hw
.io_base
+
1757 HCLGEVF_VF_RST_ING
, val
,
1758 !(val
& HCLGEVF_VF_RST_ING_BIT
),
1759 HCLGEVF_RESET_WAIT_US
,
1760 HCLGEVF_RESET_WAIT_TIMEOUT_US
);
1762 ret
= readl_poll_timeout(hdev
->hw
.io_base
+
1763 HCLGEVF_RST_ING
, val
,
1764 !(val
& HCLGEVF_RST_ING_BITS
),
1765 HCLGEVF_RESET_WAIT_US
,
1766 HCLGEVF_RESET_WAIT_TIMEOUT_US
);
1768 /* hardware completion status should be available by this time */
1770 dev_err(&hdev
->pdev
->dev
,
1771 "couldn't get reset done status from h/w, timeout!\n");
1775 /* we will wait a bit more to let reset of the stack to complete. This
1776 * might happen in case reset assertion was made by PF. Yes, this also
1777 * means we might end up waiting bit more even for VF reset.
1784 static void hclgevf_reset_handshake(struct hclgevf_dev
*hdev
, bool enable
)
1788 reg_val
= hclgevf_read_dev(&hdev
->hw
, HCLGEVF_NIC_CSQ_DEPTH_REG
);
1790 reg_val
|= HCLGEVF_NIC_SW_RST_RDY
;
1792 reg_val
&= ~HCLGEVF_NIC_SW_RST_RDY
;
1794 hclgevf_write_dev(&hdev
->hw
, HCLGEVF_NIC_CSQ_DEPTH_REG
,
1798 static int hclgevf_reset_stack(struct hclgevf_dev
*hdev
)
1802 /* uninitialize the nic client */
1803 ret
= hclgevf_notify_client(hdev
, HNAE3_UNINIT_CLIENT
);
1807 /* re-initialize the hclge device */
1808 ret
= hclgevf_reset_hdev(hdev
);
1810 dev_err(&hdev
->pdev
->dev
,
1811 "hclge device re-init failed, VF is disabled!\n");
1815 /* bring up the nic client again */
1816 ret
= hclgevf_notify_client(hdev
, HNAE3_INIT_CLIENT
);
1820 /* clear handshake status with IMP */
1821 hclgevf_reset_handshake(hdev
, false);
1823 /* bring up the nic to enable TX/RX again */
1824 return hclgevf_notify_client(hdev
, HNAE3_UP_CLIENT
);
1827 static int hclgevf_reset_prepare_wait(struct hclgevf_dev
*hdev
)
1829 #define HCLGEVF_RESET_SYNC_TIME 100
1831 if (hdev
->reset_type
== HNAE3_VF_FUNC_RESET
) {
1832 struct hclge_vf_to_pf_msg send_msg
;
1835 hclgevf_build_send_msg(&send_msg
, HCLGE_MBX_RESET
, 0);
1836 ret
= hclgevf_send_mbx_msg(hdev
, &send_msg
, true, NULL
, 0);
1838 dev_err(&hdev
->pdev
->dev
,
1839 "failed to assert VF reset, ret = %d\n", ret
);
1842 hdev
->rst_stats
.vf_func_rst_cnt
++;
1845 set_bit(HCLGEVF_STATE_CMD_DISABLE
, &hdev
->state
);
1846 /* inform hardware that preparatory work is done */
1847 msleep(HCLGEVF_RESET_SYNC_TIME
);
1848 hclgevf_reset_handshake(hdev
, true);
1849 dev_info(&hdev
->pdev
->dev
, "prepare reset(%d) wait done\n",
1855 static void hclgevf_dump_rst_info(struct hclgevf_dev
*hdev
)
1857 dev_info(&hdev
->pdev
->dev
, "VF function reset count: %u\n",
1858 hdev
->rst_stats
.vf_func_rst_cnt
);
1859 dev_info(&hdev
->pdev
->dev
, "FLR reset count: %u\n",
1860 hdev
->rst_stats
.flr_rst_cnt
);
1861 dev_info(&hdev
->pdev
->dev
, "VF reset count: %u\n",
1862 hdev
->rst_stats
.vf_rst_cnt
);
1863 dev_info(&hdev
->pdev
->dev
, "reset done count: %u\n",
1864 hdev
->rst_stats
.rst_done_cnt
);
1865 dev_info(&hdev
->pdev
->dev
, "HW reset done count: %u\n",
1866 hdev
->rst_stats
.hw_rst_done_cnt
);
1867 dev_info(&hdev
->pdev
->dev
, "reset count: %u\n",
1868 hdev
->rst_stats
.rst_cnt
);
1869 dev_info(&hdev
->pdev
->dev
, "reset fail count: %u\n",
1870 hdev
->rst_stats
.rst_fail_cnt
);
1871 dev_info(&hdev
->pdev
->dev
, "vector0 interrupt enable status: 0x%x\n",
1872 hclgevf_read_dev(&hdev
->hw
, HCLGEVF_MISC_VECTOR_REG_BASE
));
1873 dev_info(&hdev
->pdev
->dev
, "vector0 interrupt status: 0x%x\n",
1874 hclgevf_read_dev(&hdev
->hw
, HCLGEVF_VECTOR0_CMDQ_STATE_REG
));
1875 dev_info(&hdev
->pdev
->dev
, "handshake status: 0x%x\n",
1876 hclgevf_read_dev(&hdev
->hw
, HCLGEVF_CMDQ_TX_DEPTH_REG
));
1877 dev_info(&hdev
->pdev
->dev
, "function reset status: 0x%x\n",
1878 hclgevf_read_dev(&hdev
->hw
, HCLGEVF_RST_ING
));
1879 dev_info(&hdev
->pdev
->dev
, "hdev state: 0x%lx\n", hdev
->state
);
1882 static void hclgevf_reset_err_handle(struct hclgevf_dev
*hdev
)
1884 /* recover handshake status with IMP when reset fail */
1885 hclgevf_reset_handshake(hdev
, true);
1886 hdev
->rst_stats
.rst_fail_cnt
++;
1887 dev_err(&hdev
->pdev
->dev
, "failed to reset VF(%u)\n",
1888 hdev
->rst_stats
.rst_fail_cnt
);
1890 if (hdev
->rst_stats
.rst_fail_cnt
< HCLGEVF_RESET_MAX_FAIL_CNT
)
1891 set_bit(hdev
->reset_type
, &hdev
->reset_pending
);
1893 if (hclgevf_is_reset_pending(hdev
)) {
1894 set_bit(HCLGEVF_RESET_PENDING
, &hdev
->reset_state
);
1895 hclgevf_reset_task_schedule(hdev
);
1897 set_bit(HCLGEVF_STATE_RST_FAIL
, &hdev
->state
);
1898 hclgevf_dump_rst_info(hdev
);
1902 static int hclgevf_reset_prepare(struct hclgevf_dev
*hdev
)
1906 hdev
->rst_stats
.rst_cnt
++;
1908 /* perform reset of the stack & ae device for a client */
1909 ret
= hclgevf_notify_roce_client(hdev
, HNAE3_DOWN_CLIENT
);
1914 /* bring down the nic to stop any ongoing TX/RX */
1915 ret
= hclgevf_notify_client(hdev
, HNAE3_DOWN_CLIENT
);
1920 return hclgevf_reset_prepare_wait(hdev
);
1923 static int hclgevf_reset_rebuild(struct hclgevf_dev
*hdev
)
1927 hdev
->rst_stats
.hw_rst_done_cnt
++;
1928 ret
= hclgevf_notify_roce_client(hdev
, HNAE3_UNINIT_CLIENT
);
1933 /* now, re-initialize the nic client and ae device */
1934 ret
= hclgevf_reset_stack(hdev
);
1937 dev_err(&hdev
->pdev
->dev
, "failed to reset VF stack\n");
1941 ret
= hclgevf_notify_roce_client(hdev
, HNAE3_INIT_CLIENT
);
1942 /* ignore RoCE notify error if it fails HCLGEVF_RESET_MAX_FAIL_CNT - 1
1946 hdev
->rst_stats
.rst_fail_cnt
< HCLGEVF_RESET_MAX_FAIL_CNT
- 1)
1949 ret
= hclgevf_notify_roce_client(hdev
, HNAE3_UP_CLIENT
);
1953 hdev
->last_reset_time
= jiffies
;
1954 hdev
->rst_stats
.rst_done_cnt
++;
1955 hdev
->rst_stats
.rst_fail_cnt
= 0;
1956 clear_bit(HCLGEVF_STATE_RST_FAIL
, &hdev
->state
);
1961 static void hclgevf_reset(struct hclgevf_dev
*hdev
)
1963 if (hclgevf_reset_prepare(hdev
))
1966 /* check if VF could successfully fetch the hardware reset completion
1967 * status from the hardware
1969 if (hclgevf_reset_wait(hdev
)) {
1970 /* can't do much in this situation, will disable VF */
1971 dev_err(&hdev
->pdev
->dev
,
1972 "failed to fetch H/W reset completion status\n");
1976 if (hclgevf_reset_rebuild(hdev
))
1982 hclgevf_reset_err_handle(hdev
);
1985 static enum hnae3_reset_type
hclgevf_get_reset_level(struct hclgevf_dev
*hdev
,
1986 unsigned long *addr
)
1988 enum hnae3_reset_type rst_level
= HNAE3_NONE_RESET
;
1990 /* return the highest priority reset level amongst all */
1991 if (test_bit(HNAE3_VF_RESET
, addr
)) {
1992 rst_level
= HNAE3_VF_RESET
;
1993 clear_bit(HNAE3_VF_RESET
, addr
);
1994 clear_bit(HNAE3_VF_PF_FUNC_RESET
, addr
);
1995 clear_bit(HNAE3_VF_FUNC_RESET
, addr
);
1996 } else if (test_bit(HNAE3_VF_FULL_RESET
, addr
)) {
1997 rst_level
= HNAE3_VF_FULL_RESET
;
1998 clear_bit(HNAE3_VF_FULL_RESET
, addr
);
1999 clear_bit(HNAE3_VF_FUNC_RESET
, addr
);
2000 } else if (test_bit(HNAE3_VF_PF_FUNC_RESET
, addr
)) {
2001 rst_level
= HNAE3_VF_PF_FUNC_RESET
;
2002 clear_bit(HNAE3_VF_PF_FUNC_RESET
, addr
);
2003 clear_bit(HNAE3_VF_FUNC_RESET
, addr
);
2004 } else if (test_bit(HNAE3_VF_FUNC_RESET
, addr
)) {
2005 rst_level
= HNAE3_VF_FUNC_RESET
;
2006 clear_bit(HNAE3_VF_FUNC_RESET
, addr
);
2007 } else if (test_bit(HNAE3_FLR_RESET
, addr
)) {
2008 rst_level
= HNAE3_FLR_RESET
;
2009 clear_bit(HNAE3_FLR_RESET
, addr
);
2015 static void hclgevf_reset_event(struct pci_dev
*pdev
,
2016 struct hnae3_handle
*handle
)
2018 struct hnae3_ae_dev
*ae_dev
= pci_get_drvdata(pdev
);
2019 struct hclgevf_dev
*hdev
= ae_dev
->priv
;
2021 dev_info(&hdev
->pdev
->dev
, "received reset request from VF enet\n");
2023 if (hdev
->default_reset_request
)
2025 hclgevf_get_reset_level(hdev
,
2026 &hdev
->default_reset_request
);
2028 hdev
->reset_level
= HNAE3_VF_FUNC_RESET
;
2030 /* reset of this VF requested */
2031 set_bit(HCLGEVF_RESET_REQUESTED
, &hdev
->reset_state
);
2032 hclgevf_reset_task_schedule(hdev
);
2034 hdev
->last_reset_time
= jiffies
;
2037 static void hclgevf_set_def_reset_request(struct hnae3_ae_dev
*ae_dev
,
2038 enum hnae3_reset_type rst_type
)
2040 struct hclgevf_dev
*hdev
= ae_dev
->priv
;
2042 set_bit(rst_type
, &hdev
->default_reset_request
);
2045 static void hclgevf_enable_vector(struct hclgevf_misc_vector
*vector
, bool en
)
2047 writel(en
? 1 : 0, vector
->addr
);
2050 static void hclgevf_flr_prepare(struct hnae3_ae_dev
*ae_dev
)
2052 #define HCLGEVF_FLR_RETRY_WAIT_MS 500
2053 #define HCLGEVF_FLR_RETRY_CNT 5
2055 struct hclgevf_dev
*hdev
= ae_dev
->priv
;
2060 down(&hdev
->reset_sem
);
2061 set_bit(HCLGEVF_STATE_RST_HANDLING
, &hdev
->state
);
2062 hdev
->reset_type
= HNAE3_FLR_RESET
;
2063 ret
= hclgevf_reset_prepare(hdev
);
2065 dev_err(&hdev
->pdev
->dev
, "fail to prepare FLR, ret=%d\n",
2067 if (hdev
->reset_pending
||
2068 retry_cnt
++ < HCLGEVF_FLR_RETRY_CNT
) {
2069 dev_err(&hdev
->pdev
->dev
,
2070 "reset_pending:0x%lx, retry_cnt:%d\n",
2071 hdev
->reset_pending
, retry_cnt
);
2072 clear_bit(HCLGEVF_STATE_RST_HANDLING
, &hdev
->state
);
2073 up(&hdev
->reset_sem
);
2074 msleep(HCLGEVF_FLR_RETRY_WAIT_MS
);
2079 /* disable misc vector before FLR done */
2080 hclgevf_enable_vector(&hdev
->misc_vector
, false);
2081 hdev
->rst_stats
.flr_rst_cnt
++;
2084 static void hclgevf_flr_done(struct hnae3_ae_dev
*ae_dev
)
2086 struct hclgevf_dev
*hdev
= ae_dev
->priv
;
2089 hclgevf_enable_vector(&hdev
->misc_vector
, true);
2091 ret
= hclgevf_reset_rebuild(hdev
);
2093 dev_warn(&hdev
->pdev
->dev
, "fail to rebuild, ret=%d\n",
2096 hdev
->reset_type
= HNAE3_NONE_RESET
;
2097 clear_bit(HCLGEVF_STATE_RST_HANDLING
, &hdev
->state
);
2098 up(&hdev
->reset_sem
);
2101 static u32
hclgevf_get_fw_version(struct hnae3_handle
*handle
)
2103 struct hclgevf_dev
*hdev
= hclgevf_ae_get_hdev(handle
);
2105 return hdev
->fw_version
;
2108 static void hclgevf_get_misc_vector(struct hclgevf_dev
*hdev
)
2110 struct hclgevf_misc_vector
*vector
= &hdev
->misc_vector
;
2112 vector
->vector_irq
= pci_irq_vector(hdev
->pdev
,
2113 HCLGEVF_MISC_VECTOR_NUM
);
2114 vector
->addr
= hdev
->hw
.io_base
+ HCLGEVF_MISC_VECTOR_REG_BASE
;
2115 /* vector status always valid for Vector 0 */
2116 hdev
->vector_status
[HCLGEVF_MISC_VECTOR_NUM
] = 0;
2117 hdev
->vector_irq
[HCLGEVF_MISC_VECTOR_NUM
] = vector
->vector_irq
;
2119 hdev
->num_msi_left
-= 1;
2120 hdev
->num_msi_used
+= 1;
2123 void hclgevf_reset_task_schedule(struct hclgevf_dev
*hdev
)
2125 if (!test_bit(HCLGEVF_STATE_REMOVING
, &hdev
->state
) &&
2126 !test_and_set_bit(HCLGEVF_STATE_RST_SERVICE_SCHED
,
2128 mod_delayed_work(hclgevf_wq
, &hdev
->service_task
, 0);
2131 void hclgevf_mbx_task_schedule(struct hclgevf_dev
*hdev
)
2133 if (!test_bit(HCLGEVF_STATE_REMOVING
, &hdev
->state
) &&
2134 !test_and_set_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED
,
2136 mod_delayed_work(hclgevf_wq
, &hdev
->service_task
, 0);
2139 static void hclgevf_task_schedule(struct hclgevf_dev
*hdev
,
2140 unsigned long delay
)
2142 if (!test_bit(HCLGEVF_STATE_REMOVING
, &hdev
->state
) &&
2143 !test_bit(HCLGEVF_STATE_RST_FAIL
, &hdev
->state
))
2144 mod_delayed_work(hclgevf_wq
, &hdev
->service_task
, delay
);
2147 static void hclgevf_reset_service_task(struct hclgevf_dev
*hdev
)
2149 #define HCLGEVF_MAX_RESET_ATTEMPTS_CNT 3
2151 if (!test_and_clear_bit(HCLGEVF_STATE_RST_SERVICE_SCHED
, &hdev
->state
))
2154 down(&hdev
->reset_sem
);
2155 set_bit(HCLGEVF_STATE_RST_HANDLING
, &hdev
->state
);
2157 if (test_and_clear_bit(HCLGEVF_RESET_PENDING
,
2158 &hdev
->reset_state
)) {
2159 /* PF has initmated that it is about to reset the hardware.
2160 * We now have to poll & check if hardware has actually
2161 * completed the reset sequence. On hardware reset completion,
2162 * VF needs to reset the client and ae device.
2164 hdev
->reset_attempts
= 0;
2166 hdev
->last_reset_time
= jiffies
;
2167 while ((hdev
->reset_type
=
2168 hclgevf_get_reset_level(hdev
, &hdev
->reset_pending
))
2169 != HNAE3_NONE_RESET
)
2170 hclgevf_reset(hdev
);
2171 } else if (test_and_clear_bit(HCLGEVF_RESET_REQUESTED
,
2172 &hdev
->reset_state
)) {
2173 /* we could be here when either of below happens:
2174 * 1. reset was initiated due to watchdog timeout caused by
2175 * a. IMP was earlier reset and our TX got choked down and
2176 * which resulted in watchdog reacting and inducing VF
2177 * reset. This also means our cmdq would be unreliable.
2178 * b. problem in TX due to other lower layer(example link
2179 * layer not functioning properly etc.)
2180 * 2. VF reset might have been initiated due to some config
2183 * NOTE: Theres no clear way to detect above cases than to react
2184 * to the response of PF for this reset request. PF will ack the
2185 * 1b and 2. cases but we will not get any intimation about 1a
2186 * from PF as cmdq would be in unreliable state i.e. mailbox
2187 * communication between PF and VF would be broken.
2189 * if we are never geting into pending state it means either:
2190 * 1. PF is not receiving our request which could be due to IMP
2193 * We cannot do much for 2. but to check first we can try reset
2194 * our PCIe + stack and see if it alleviates the problem.
2196 if (hdev
->reset_attempts
> HCLGEVF_MAX_RESET_ATTEMPTS_CNT
) {
2197 /* prepare for full reset of stack + pcie interface */
2198 set_bit(HNAE3_VF_FULL_RESET
, &hdev
->reset_pending
);
2200 /* "defer" schedule the reset task again */
2201 set_bit(HCLGEVF_RESET_PENDING
, &hdev
->reset_state
);
2203 hdev
->reset_attempts
++;
2205 set_bit(hdev
->reset_level
, &hdev
->reset_pending
);
2206 set_bit(HCLGEVF_RESET_PENDING
, &hdev
->reset_state
);
2208 hclgevf_reset_task_schedule(hdev
);
2211 hdev
->reset_type
= HNAE3_NONE_RESET
;
2212 clear_bit(HCLGEVF_STATE_RST_HANDLING
, &hdev
->state
);
2213 up(&hdev
->reset_sem
);
2216 static void hclgevf_mailbox_service_task(struct hclgevf_dev
*hdev
)
2218 if (!test_and_clear_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED
, &hdev
->state
))
2221 if (test_and_set_bit(HCLGEVF_STATE_MBX_HANDLING
, &hdev
->state
))
2224 hclgevf_mbx_async_handler(hdev
);
2226 clear_bit(HCLGEVF_STATE_MBX_HANDLING
, &hdev
->state
);
2229 static void hclgevf_keep_alive(struct hclgevf_dev
*hdev
)
2231 struct hclge_vf_to_pf_msg send_msg
;
2234 if (test_bit(HCLGEVF_STATE_CMD_DISABLE
, &hdev
->state
))
2237 hclgevf_build_send_msg(&send_msg
, HCLGE_MBX_KEEP_ALIVE
, 0);
2238 ret
= hclgevf_send_mbx_msg(hdev
, &send_msg
, false, NULL
, 0);
2240 dev_err(&hdev
->pdev
->dev
,
2241 "VF sends keep alive cmd failed(=%d)\n", ret
);
2244 static void hclgevf_periodic_service_task(struct hclgevf_dev
*hdev
)
2246 unsigned long delta
= round_jiffies_relative(HZ
);
2247 struct hnae3_handle
*handle
= &hdev
->nic
;
2249 if (test_bit(HCLGEVF_STATE_RST_FAIL
, &hdev
->state
))
2252 if (time_is_after_jiffies(hdev
->last_serv_processed
+ HZ
)) {
2253 delta
= jiffies
- hdev
->last_serv_processed
;
2255 if (delta
< round_jiffies_relative(HZ
)) {
2256 delta
= round_jiffies_relative(HZ
) - delta
;
2261 hdev
->serv_processed_cnt
++;
2262 if (!(hdev
->serv_processed_cnt
% HCLGEVF_KEEP_ALIVE_TASK_INTERVAL
))
2263 hclgevf_keep_alive(hdev
);
2265 if (test_bit(HCLGEVF_STATE_DOWN
, &hdev
->state
)) {
2266 hdev
->last_serv_processed
= jiffies
;
2270 if (!(hdev
->serv_processed_cnt
% HCLGEVF_STATS_TIMER_INTERVAL
))
2271 hclgevf_tqps_update_stats(handle
);
2273 /* request the link status from the PF. PF would be able to tell VF
2274 * about such updates in future so we might remove this later
2276 hclgevf_request_link_info(hdev
);
2278 hclgevf_update_link_mode(hdev
);
2280 hclgevf_sync_vlan_filter(hdev
);
2282 hclgevf_sync_mac_table(hdev
);
2284 hclgevf_sync_promisc_mode(hdev
);
2286 hdev
->last_serv_processed
= jiffies
;
2289 hclgevf_task_schedule(hdev
, delta
);
2292 static void hclgevf_service_task(struct work_struct
*work
)
2294 struct hclgevf_dev
*hdev
= container_of(work
, struct hclgevf_dev
,
2297 hclgevf_reset_service_task(hdev
);
2298 hclgevf_mailbox_service_task(hdev
);
2299 hclgevf_periodic_service_task(hdev
);
2301 /* Handle reset and mbx again in case periodical task delays the
2302 * handling by calling hclgevf_task_schedule() in
2303 * hclgevf_periodic_service_task()
2305 hclgevf_reset_service_task(hdev
);
2306 hclgevf_mailbox_service_task(hdev
);
2309 static void hclgevf_clear_event_cause(struct hclgevf_dev
*hdev
, u32 regclr
)
2311 hclgevf_write_dev(&hdev
->hw
, HCLGEVF_VECTOR0_CMDQ_SRC_REG
, regclr
);
2314 static enum hclgevf_evt_cause
hclgevf_check_evt_cause(struct hclgevf_dev
*hdev
,
2317 u32 val
, cmdq_stat_reg
, rst_ing_reg
;
2319 /* fetch the events from their corresponding regs */
2320 cmdq_stat_reg
= hclgevf_read_dev(&hdev
->hw
,
2321 HCLGEVF_VECTOR0_CMDQ_STATE_REG
);
2323 if (BIT(HCLGEVF_VECTOR0_RST_INT_B
) & cmdq_stat_reg
) {
2324 rst_ing_reg
= hclgevf_read_dev(&hdev
->hw
, HCLGEVF_RST_ING
);
2325 dev_info(&hdev
->pdev
->dev
,
2326 "receive reset interrupt 0x%x!\n", rst_ing_reg
);
2327 set_bit(HNAE3_VF_RESET
, &hdev
->reset_pending
);
2328 set_bit(HCLGEVF_RESET_PENDING
, &hdev
->reset_state
);
2329 set_bit(HCLGEVF_STATE_CMD_DISABLE
, &hdev
->state
);
2330 *clearval
= ~(1U << HCLGEVF_VECTOR0_RST_INT_B
);
2331 hdev
->rst_stats
.vf_rst_cnt
++;
2332 /* set up VF hardware reset status, its PF will clear
2333 * this status when PF has initialized done.
2335 val
= hclgevf_read_dev(&hdev
->hw
, HCLGEVF_VF_RST_ING
);
2336 hclgevf_write_dev(&hdev
->hw
, HCLGEVF_VF_RST_ING
,
2337 val
| HCLGEVF_VF_RST_ING_BIT
);
2338 return HCLGEVF_VECTOR0_EVENT_RST
;
2341 /* check for vector0 mailbox(=CMDQ RX) event source */
2342 if (BIT(HCLGEVF_VECTOR0_RX_CMDQ_INT_B
) & cmdq_stat_reg
) {
2343 /* for revision 0x21, clearing interrupt is writing bit 0
2344 * to the clear register, writing bit 1 means to keep the
2346 * for revision 0x20, the clear register is a read & write
2347 * register, so we should just write 0 to the bit we are
2348 * handling, and keep other bits as cmdq_stat_reg.
2350 if (hdev
->ae_dev
->dev_version
>= HNAE3_DEVICE_VERSION_V2
)
2351 *clearval
= ~(1U << HCLGEVF_VECTOR0_RX_CMDQ_INT_B
);
2353 *clearval
= cmdq_stat_reg
&
2354 ~BIT(HCLGEVF_VECTOR0_RX_CMDQ_INT_B
);
2356 return HCLGEVF_VECTOR0_EVENT_MBX
;
2359 /* print other vector0 event source */
2360 dev_info(&hdev
->pdev
->dev
,
2361 "vector 0 interrupt from unknown source, cmdq_src = %#x\n",
2364 return HCLGEVF_VECTOR0_EVENT_OTHER
;
2367 static irqreturn_t
hclgevf_misc_irq_handle(int irq
, void *data
)
2369 enum hclgevf_evt_cause event_cause
;
2370 struct hclgevf_dev
*hdev
= data
;
2373 hclgevf_enable_vector(&hdev
->misc_vector
, false);
2374 event_cause
= hclgevf_check_evt_cause(hdev
, &clearval
);
2376 switch (event_cause
) {
2377 case HCLGEVF_VECTOR0_EVENT_RST
:
2378 hclgevf_reset_task_schedule(hdev
);
2380 case HCLGEVF_VECTOR0_EVENT_MBX
:
2381 hclgevf_mbx_handler(hdev
);
2387 if (event_cause
!= HCLGEVF_VECTOR0_EVENT_OTHER
) {
2388 hclgevf_clear_event_cause(hdev
, clearval
);
2389 hclgevf_enable_vector(&hdev
->misc_vector
, true);
2395 static int hclgevf_configure(struct hclgevf_dev
*hdev
)
2399 /* get current port based vlan state from PF */
2400 ret
= hclgevf_get_port_base_vlan_filter_state(hdev
);
2404 /* get queue configuration from PF */
2405 ret
= hclgevf_get_queue_info(hdev
);
2409 /* get queue depth info from PF */
2410 ret
= hclgevf_get_queue_depth(hdev
);
2414 ret
= hclgevf_get_pf_media_type(hdev
);
2418 /* get tc configuration from PF */
2419 return hclgevf_get_tc_info(hdev
);
2422 static int hclgevf_alloc_hdev(struct hnae3_ae_dev
*ae_dev
)
2424 struct pci_dev
*pdev
= ae_dev
->pdev
;
2425 struct hclgevf_dev
*hdev
;
2427 hdev
= devm_kzalloc(&pdev
->dev
, sizeof(*hdev
), GFP_KERNEL
);
2432 hdev
->ae_dev
= ae_dev
;
2433 ae_dev
->priv
= hdev
;
2438 static int hclgevf_init_roce_base_info(struct hclgevf_dev
*hdev
)
2440 struct hnae3_handle
*roce
= &hdev
->roce
;
2441 struct hnae3_handle
*nic
= &hdev
->nic
;
2443 roce
->rinfo
.num_vectors
= hdev
->num_roce_msix
;
2445 if (hdev
->num_msi_left
< roce
->rinfo
.num_vectors
||
2446 hdev
->num_msi_left
== 0)
2449 roce
->rinfo
.base_vector
= hdev
->roce_base_vector
;
2451 roce
->rinfo
.netdev
= nic
->kinfo
.netdev
;
2452 roce
->rinfo
.roce_io_base
= hdev
->hw
.io_base
;
2453 roce
->rinfo
.roce_mem_base
= hdev
->hw
.mem_base
;
2455 roce
->pdev
= nic
->pdev
;
2456 roce
->ae_algo
= nic
->ae_algo
;
2457 roce
->numa_node_mask
= nic
->numa_node_mask
;
2462 static int hclgevf_config_gro(struct hclgevf_dev
*hdev
, bool en
)
2464 struct hclgevf_cfg_gro_status_cmd
*req
;
2465 struct hclgevf_desc desc
;
2468 if (!hnae3_dev_gro_supported(hdev
))
2471 hclgevf_cmd_setup_basic_desc(&desc
, HCLGEVF_OPC_GRO_GENERIC_CONFIG
,
2473 req
= (struct hclgevf_cfg_gro_status_cmd
*)desc
.data
;
2475 req
->gro_en
= en
? 1 : 0;
2477 ret
= hclgevf_cmd_send(&hdev
->hw
, &desc
, 1);
2479 dev_err(&hdev
->pdev
->dev
,
2480 "VF GRO hardware config cmd failed, ret = %d.\n", ret
);
2485 static void hclgevf_rss_init_cfg(struct hclgevf_dev
*hdev
)
2487 struct hclgevf_rss_cfg
*rss_cfg
= &hdev
->rss_cfg
;
2488 struct hclgevf_rss_tuple_cfg
*tuple_sets
;
2491 rss_cfg
->hash_algo
= HCLGEVF_RSS_HASH_ALGO_TOEPLITZ
;
2492 rss_cfg
->rss_size
= hdev
->nic
.kinfo
.rss_size
;
2493 tuple_sets
= &rss_cfg
->rss_tuple_sets
;
2494 if (hdev
->ae_dev
->dev_version
>= HNAE3_DEVICE_VERSION_V2
) {
2495 rss_cfg
->hash_algo
= HCLGEVF_RSS_HASH_ALGO_SIMPLE
;
2496 memcpy(rss_cfg
->rss_hash_key
, hclgevf_hash_key
,
2497 HCLGEVF_RSS_KEY_SIZE
);
2499 tuple_sets
->ipv4_tcp_en
= HCLGEVF_RSS_INPUT_TUPLE_OTHER
;
2500 tuple_sets
->ipv4_udp_en
= HCLGEVF_RSS_INPUT_TUPLE_OTHER
;
2501 tuple_sets
->ipv4_sctp_en
= HCLGEVF_RSS_INPUT_TUPLE_SCTP
;
2502 tuple_sets
->ipv4_fragment_en
= HCLGEVF_RSS_INPUT_TUPLE_OTHER
;
2503 tuple_sets
->ipv6_tcp_en
= HCLGEVF_RSS_INPUT_TUPLE_OTHER
;
2504 tuple_sets
->ipv6_udp_en
= HCLGEVF_RSS_INPUT_TUPLE_OTHER
;
2505 tuple_sets
->ipv6_sctp_en
=
2506 hdev
->ae_dev
->dev_version
<= HNAE3_DEVICE_VERSION_V2
?
2507 HCLGEVF_RSS_INPUT_TUPLE_SCTP_NO_PORT
:
2508 HCLGEVF_RSS_INPUT_TUPLE_SCTP
;
2509 tuple_sets
->ipv6_fragment_en
= HCLGEVF_RSS_INPUT_TUPLE_OTHER
;
2512 /* Initialize RSS indirect table */
2513 for (i
= 0; i
< HCLGEVF_RSS_IND_TBL_SIZE
; i
++)
2514 rss_cfg
->rss_indirection_tbl
[i
] = i
% rss_cfg
->rss_size
;
2517 static int hclgevf_rss_init_hw(struct hclgevf_dev
*hdev
)
2519 struct hclgevf_rss_cfg
*rss_cfg
= &hdev
->rss_cfg
;
2522 if (hdev
->ae_dev
->dev_version
>= HNAE3_DEVICE_VERSION_V2
) {
2523 ret
= hclgevf_set_rss_algo_key(hdev
, rss_cfg
->hash_algo
,
2524 rss_cfg
->rss_hash_key
);
2528 ret
= hclgevf_set_rss_input_tuple(hdev
, rss_cfg
);
2533 ret
= hclgevf_set_rss_indir_table(hdev
);
2537 return hclgevf_set_rss_tc_mode(hdev
, rss_cfg
->rss_size
);
2540 static int hclgevf_init_vlan_config(struct hclgevf_dev
*hdev
)
2542 return hclgevf_set_vlan_filter(&hdev
->nic
, htons(ETH_P_8021Q
), 0,
2546 static void hclgevf_flush_link_update(struct hclgevf_dev
*hdev
)
2548 #define HCLGEVF_FLUSH_LINK_TIMEOUT 100000
2550 unsigned long last
= hdev
->serv_processed_cnt
;
2553 while (test_bit(HCLGEVF_STATE_LINK_UPDATING
, &hdev
->state
) &&
2554 i
++ < HCLGEVF_FLUSH_LINK_TIMEOUT
&&
2555 last
== hdev
->serv_processed_cnt
)
2559 static void hclgevf_set_timer_task(struct hnae3_handle
*handle
, bool enable
)
2561 struct hclgevf_dev
*hdev
= hclgevf_ae_get_hdev(handle
);
2564 hclgevf_task_schedule(hdev
, 0);
2566 set_bit(HCLGEVF_STATE_DOWN
, &hdev
->state
);
2568 /* flush memory to make sure DOWN is seen by service task */
2569 smp_mb__before_atomic();
2570 hclgevf_flush_link_update(hdev
);
2574 static int hclgevf_ae_start(struct hnae3_handle
*handle
)
2576 struct hclgevf_dev
*hdev
= hclgevf_ae_get_hdev(handle
);
2578 hclgevf_reset_tqp_stats(handle
);
2580 hclgevf_request_link_info(hdev
);
2582 hclgevf_update_link_mode(hdev
);
2584 clear_bit(HCLGEVF_STATE_DOWN
, &hdev
->state
);
2589 static void hclgevf_ae_stop(struct hnae3_handle
*handle
)
2591 struct hclgevf_dev
*hdev
= hclgevf_ae_get_hdev(handle
);
2594 set_bit(HCLGEVF_STATE_DOWN
, &hdev
->state
);
2596 if (hdev
->reset_type
!= HNAE3_VF_RESET
)
2597 for (i
= 0; i
< handle
->kinfo
.num_tqps
; i
++)
2598 if (hclgevf_reset_tqp(handle
, i
))
2601 hclgevf_reset_tqp_stats(handle
);
2602 hclgevf_update_link_status(hdev
, 0);
2605 static int hclgevf_set_alive(struct hnae3_handle
*handle
, bool alive
)
2607 #define HCLGEVF_STATE_ALIVE 1
2608 #define HCLGEVF_STATE_NOT_ALIVE 0
2610 struct hclgevf_dev
*hdev
= hclgevf_ae_get_hdev(handle
);
2611 struct hclge_vf_to_pf_msg send_msg
;
2613 hclgevf_build_send_msg(&send_msg
, HCLGE_MBX_SET_ALIVE
, 0);
2614 send_msg
.data
[0] = alive
? HCLGEVF_STATE_ALIVE
:
2615 HCLGEVF_STATE_NOT_ALIVE
;
2616 return hclgevf_send_mbx_msg(hdev
, &send_msg
, false, NULL
, 0);
2619 static int hclgevf_client_start(struct hnae3_handle
*handle
)
2621 return hclgevf_set_alive(handle
, true);
2624 static void hclgevf_client_stop(struct hnae3_handle
*handle
)
2626 struct hclgevf_dev
*hdev
= hclgevf_ae_get_hdev(handle
);
2629 ret
= hclgevf_set_alive(handle
, false);
2631 dev_warn(&hdev
->pdev
->dev
,
2632 "%s failed %d\n", __func__
, ret
);
2635 static void hclgevf_state_init(struct hclgevf_dev
*hdev
)
2637 clear_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED
, &hdev
->state
);
2638 clear_bit(HCLGEVF_STATE_MBX_HANDLING
, &hdev
->state
);
2639 clear_bit(HCLGEVF_STATE_RST_FAIL
, &hdev
->state
);
2641 INIT_DELAYED_WORK(&hdev
->service_task
, hclgevf_service_task
);
2643 mutex_init(&hdev
->mbx_resp
.mbx_mutex
);
2644 sema_init(&hdev
->reset_sem
, 1);
2646 spin_lock_init(&hdev
->mac_table
.mac_list_lock
);
2647 INIT_LIST_HEAD(&hdev
->mac_table
.uc_mac_list
);
2648 INIT_LIST_HEAD(&hdev
->mac_table
.mc_mac_list
);
2650 /* bring the device down */
2651 set_bit(HCLGEVF_STATE_DOWN
, &hdev
->state
);
2654 static void hclgevf_state_uninit(struct hclgevf_dev
*hdev
)
2656 set_bit(HCLGEVF_STATE_DOWN
, &hdev
->state
);
2657 set_bit(HCLGEVF_STATE_REMOVING
, &hdev
->state
);
2659 if (hdev
->service_task
.work
.func
)
2660 cancel_delayed_work_sync(&hdev
->service_task
);
2662 mutex_destroy(&hdev
->mbx_resp
.mbx_mutex
);
2665 static int hclgevf_init_msi(struct hclgevf_dev
*hdev
)
2667 struct pci_dev
*pdev
= hdev
->pdev
;
2671 if (hnae3_dev_roce_supported(hdev
))
2672 vectors
= pci_alloc_irq_vectors(pdev
,
2673 hdev
->roce_base_msix_offset
+ 1,
2677 vectors
= pci_alloc_irq_vectors(pdev
, HNAE3_MIN_VECTOR_NUM
,
2679 PCI_IRQ_MSI
| PCI_IRQ_MSIX
);
2683 "failed(%d) to allocate MSI/MSI-X vectors\n",
2687 if (vectors
< hdev
->num_msi
)
2688 dev_warn(&hdev
->pdev
->dev
,
2689 "requested %u MSI/MSI-X, but allocated %d MSI/MSI-X\n",
2690 hdev
->num_msi
, vectors
);
2692 hdev
->num_msi
= vectors
;
2693 hdev
->num_msi_left
= vectors
;
2695 hdev
->base_msi_vector
= pdev
->irq
;
2696 hdev
->roce_base_vector
= pdev
->irq
+ hdev
->roce_base_msix_offset
;
2698 hdev
->vector_status
= devm_kcalloc(&pdev
->dev
, hdev
->num_msi
,
2699 sizeof(u16
), GFP_KERNEL
);
2700 if (!hdev
->vector_status
) {
2701 pci_free_irq_vectors(pdev
);
2705 for (i
= 0; i
< hdev
->num_msi
; i
++)
2706 hdev
->vector_status
[i
] = HCLGEVF_INVALID_VPORT
;
2708 hdev
->vector_irq
= devm_kcalloc(&pdev
->dev
, hdev
->num_msi
,
2709 sizeof(int), GFP_KERNEL
);
2710 if (!hdev
->vector_irq
) {
2711 devm_kfree(&pdev
->dev
, hdev
->vector_status
);
2712 pci_free_irq_vectors(pdev
);
2719 static void hclgevf_uninit_msi(struct hclgevf_dev
*hdev
)
2721 struct pci_dev
*pdev
= hdev
->pdev
;
2723 devm_kfree(&pdev
->dev
, hdev
->vector_status
);
2724 devm_kfree(&pdev
->dev
, hdev
->vector_irq
);
2725 pci_free_irq_vectors(pdev
);
2728 static int hclgevf_misc_irq_init(struct hclgevf_dev
*hdev
)
2732 hclgevf_get_misc_vector(hdev
);
2734 snprintf(hdev
->misc_vector
.name
, HNAE3_INT_NAME_LEN
, "%s-misc-%s",
2735 HCLGEVF_NAME
, pci_name(hdev
->pdev
));
2736 ret
= request_irq(hdev
->misc_vector
.vector_irq
, hclgevf_misc_irq_handle
,
2737 0, hdev
->misc_vector
.name
, hdev
);
2739 dev_err(&hdev
->pdev
->dev
, "VF failed to request misc irq(%d)\n",
2740 hdev
->misc_vector
.vector_irq
);
2744 hclgevf_clear_event_cause(hdev
, 0);
2746 /* enable misc. vector(vector 0) */
2747 hclgevf_enable_vector(&hdev
->misc_vector
, true);
2752 static void hclgevf_misc_irq_uninit(struct hclgevf_dev
*hdev
)
2754 /* disable misc vector(vector 0) */
2755 hclgevf_enable_vector(&hdev
->misc_vector
, false);
2756 synchronize_irq(hdev
->misc_vector
.vector_irq
);
2757 free_irq(hdev
->misc_vector
.vector_irq
, hdev
);
2758 hclgevf_free_vector(hdev
, 0);
2761 static void hclgevf_info_show(struct hclgevf_dev
*hdev
)
2763 struct device
*dev
= &hdev
->pdev
->dev
;
2765 dev_info(dev
, "VF info begin:\n");
2767 dev_info(dev
, "Task queue pairs numbers: %u\n", hdev
->num_tqps
);
2768 dev_info(dev
, "Desc num per TX queue: %u\n", hdev
->num_tx_desc
);
2769 dev_info(dev
, "Desc num per RX queue: %u\n", hdev
->num_rx_desc
);
2770 dev_info(dev
, "Numbers of vports: %u\n", hdev
->num_alloc_vport
);
2771 dev_info(dev
, "HW tc map: 0x%x\n", hdev
->hw_tc_map
);
2772 dev_info(dev
, "PF media type of this VF: %u\n",
2773 hdev
->hw
.mac
.media_type
);
2775 dev_info(dev
, "VF info end.\n");
2778 static int hclgevf_init_nic_client_instance(struct hnae3_ae_dev
*ae_dev
,
2779 struct hnae3_client
*client
)
2781 struct hclgevf_dev
*hdev
= ae_dev
->priv
;
2782 int rst_cnt
= hdev
->rst_stats
.rst_cnt
;
2785 ret
= client
->ops
->init_instance(&hdev
->nic
);
2789 set_bit(HCLGEVF_STATE_NIC_REGISTERED
, &hdev
->state
);
2790 if (test_bit(HCLGEVF_STATE_RST_HANDLING
, &hdev
->state
) ||
2791 rst_cnt
!= hdev
->rst_stats
.rst_cnt
) {
2792 clear_bit(HCLGEVF_STATE_NIC_REGISTERED
, &hdev
->state
);
2794 client
->ops
->uninit_instance(&hdev
->nic
, 0);
2798 hnae3_set_client_init_flag(client
, ae_dev
, 1);
2800 if (netif_msg_drv(&hdev
->nic
))
2801 hclgevf_info_show(hdev
);
2806 static int hclgevf_init_roce_client_instance(struct hnae3_ae_dev
*ae_dev
,
2807 struct hnae3_client
*client
)
2809 struct hclgevf_dev
*hdev
= ae_dev
->priv
;
2812 if (!hnae3_dev_roce_supported(hdev
) || !hdev
->roce_client
||
2816 ret
= hclgevf_init_roce_base_info(hdev
);
2820 ret
= client
->ops
->init_instance(&hdev
->roce
);
2824 set_bit(HCLGEVF_STATE_ROCE_REGISTERED
, &hdev
->state
);
2825 hnae3_set_client_init_flag(client
, ae_dev
, 1);
2830 static int hclgevf_init_client_instance(struct hnae3_client
*client
,
2831 struct hnae3_ae_dev
*ae_dev
)
2833 struct hclgevf_dev
*hdev
= ae_dev
->priv
;
2836 switch (client
->type
) {
2837 case HNAE3_CLIENT_KNIC
:
2838 hdev
->nic_client
= client
;
2839 hdev
->nic
.client
= client
;
2841 ret
= hclgevf_init_nic_client_instance(ae_dev
, client
);
2845 ret
= hclgevf_init_roce_client_instance(ae_dev
,
2851 case HNAE3_CLIENT_ROCE
:
2852 if (hnae3_dev_roce_supported(hdev
)) {
2853 hdev
->roce_client
= client
;
2854 hdev
->roce
.client
= client
;
2857 ret
= hclgevf_init_roce_client_instance(ae_dev
, client
);
2869 hdev
->nic_client
= NULL
;
2870 hdev
->nic
.client
= NULL
;
2873 hdev
->roce_client
= NULL
;
2874 hdev
->roce
.client
= NULL
;
2878 static void hclgevf_uninit_client_instance(struct hnae3_client
*client
,
2879 struct hnae3_ae_dev
*ae_dev
)
2881 struct hclgevf_dev
*hdev
= ae_dev
->priv
;
2883 /* un-init roce, if it exists */
2884 if (hdev
->roce_client
) {
2885 clear_bit(HCLGEVF_STATE_ROCE_REGISTERED
, &hdev
->state
);
2886 hdev
->roce_client
->ops
->uninit_instance(&hdev
->roce
, 0);
2887 hdev
->roce_client
= NULL
;
2888 hdev
->roce
.client
= NULL
;
2891 /* un-init nic/unic, if this was not called by roce client */
2892 if (client
->ops
->uninit_instance
&& hdev
->nic_client
&&
2893 client
->type
!= HNAE3_CLIENT_ROCE
) {
2894 clear_bit(HCLGEVF_STATE_NIC_REGISTERED
, &hdev
->state
);
2896 client
->ops
->uninit_instance(&hdev
->nic
, 0);
2897 hdev
->nic_client
= NULL
;
2898 hdev
->nic
.client
= NULL
;
2902 static int hclgevf_dev_mem_map(struct hclgevf_dev
*hdev
)
2904 #define HCLGEVF_MEM_BAR 4
2906 struct pci_dev
*pdev
= hdev
->pdev
;
2907 struct hclgevf_hw
*hw
= &hdev
->hw
;
2909 /* for device does not have device memory, return directly */
2910 if (!(pci_select_bars(pdev
, IORESOURCE_MEM
) & BIT(HCLGEVF_MEM_BAR
)))
2913 hw
->mem_base
= devm_ioremap_wc(&pdev
->dev
,
2914 pci_resource_start(pdev
,
2916 pci_resource_len(pdev
, HCLGEVF_MEM_BAR
));
2917 if (!hw
->mem_base
) {
2918 dev_err(&pdev
->dev
, "failed to map device memory\n");
2925 static int hclgevf_pci_init(struct hclgevf_dev
*hdev
)
2927 struct pci_dev
*pdev
= hdev
->pdev
;
2928 struct hclgevf_hw
*hw
;
2931 ret
= pci_enable_device(pdev
);
2933 dev_err(&pdev
->dev
, "failed to enable PCI device\n");
2937 ret
= dma_set_mask_and_coherent(&pdev
->dev
, DMA_BIT_MASK(64));
2939 dev_err(&pdev
->dev
, "can't set consistent PCI DMA, exiting");
2940 goto err_disable_device
;
2943 ret
= pci_request_regions(pdev
, HCLGEVF_DRIVER_NAME
);
2945 dev_err(&pdev
->dev
, "PCI request regions failed %d\n", ret
);
2946 goto err_disable_device
;
2949 pci_set_master(pdev
);
2952 hw
->io_base
= pci_iomap(pdev
, 2, 0);
2954 dev_err(&pdev
->dev
, "can't map configuration register space\n");
2956 goto err_clr_master
;
2959 ret
= hclgevf_dev_mem_map(hdev
);
2961 goto err_unmap_io_base
;
2966 pci_iounmap(pdev
, hdev
->hw
.io_base
);
2968 pci_clear_master(pdev
);
2969 pci_release_regions(pdev
);
2971 pci_disable_device(pdev
);
2976 static void hclgevf_pci_uninit(struct hclgevf_dev
*hdev
)
2978 struct pci_dev
*pdev
= hdev
->pdev
;
2980 if (hdev
->hw
.mem_base
)
2981 devm_iounmap(&pdev
->dev
, hdev
->hw
.mem_base
);
2983 pci_iounmap(pdev
, hdev
->hw
.io_base
);
2984 pci_clear_master(pdev
);
2985 pci_release_regions(pdev
);
2986 pci_disable_device(pdev
);
2989 static int hclgevf_query_vf_resource(struct hclgevf_dev
*hdev
)
2991 struct hclgevf_query_res_cmd
*req
;
2992 struct hclgevf_desc desc
;
2995 hclgevf_cmd_setup_basic_desc(&desc
, HCLGEVF_OPC_QUERY_VF_RSRC
, true);
2996 ret
= hclgevf_cmd_send(&hdev
->hw
, &desc
, 1);
2998 dev_err(&hdev
->pdev
->dev
,
2999 "query vf resource failed, ret = %d.\n", ret
);
3003 req
= (struct hclgevf_query_res_cmd
*)desc
.data
;
3005 if (hnae3_dev_roce_supported(hdev
)) {
3006 hdev
->roce_base_msix_offset
=
3007 hnae3_get_field(le16_to_cpu(req
->msixcap_localid_ba_rocee
),
3008 HCLGEVF_MSIX_OFT_ROCEE_M
,
3009 HCLGEVF_MSIX_OFT_ROCEE_S
);
3010 hdev
->num_roce_msix
=
3011 hnae3_get_field(le16_to_cpu(req
->vf_intr_vector_number
),
3012 HCLGEVF_VEC_NUM_M
, HCLGEVF_VEC_NUM_S
);
3014 /* nic's msix numbers is always equals to the roce's. */
3015 hdev
->num_nic_msix
= hdev
->num_roce_msix
;
3017 /* VF should have NIC vectors and Roce vectors, NIC vectors
3018 * are queued before Roce vectors. The offset is fixed to 64.
3020 hdev
->num_msi
= hdev
->num_roce_msix
+
3021 hdev
->roce_base_msix_offset
;
3024 hnae3_get_field(le16_to_cpu(req
->vf_intr_vector_number
),
3025 HCLGEVF_VEC_NUM_M
, HCLGEVF_VEC_NUM_S
);
3027 hdev
->num_nic_msix
= hdev
->num_msi
;
3030 if (hdev
->num_nic_msix
< HNAE3_MIN_VECTOR_NUM
) {
3031 dev_err(&hdev
->pdev
->dev
,
3032 "Just %u msi resources, not enough for vf(min:2).\n",
3033 hdev
->num_nic_msix
);
3040 static void hclgevf_set_default_dev_specs(struct hclgevf_dev
*hdev
)
3042 #define HCLGEVF_MAX_NON_TSO_BD_NUM 8U
3044 struct hnae3_ae_dev
*ae_dev
= pci_get_drvdata(hdev
->pdev
);
3046 ae_dev
->dev_specs
.max_non_tso_bd_num
=
3047 HCLGEVF_MAX_NON_TSO_BD_NUM
;
3048 ae_dev
->dev_specs
.rss_ind_tbl_size
= HCLGEVF_RSS_IND_TBL_SIZE
;
3049 ae_dev
->dev_specs
.rss_key_size
= HCLGEVF_RSS_KEY_SIZE
;
3050 ae_dev
->dev_specs
.max_int_gl
= HCLGEVF_DEF_MAX_INT_GL
;
3053 static void hclgevf_parse_dev_specs(struct hclgevf_dev
*hdev
,
3054 struct hclgevf_desc
*desc
)
3056 struct hnae3_ae_dev
*ae_dev
= pci_get_drvdata(hdev
->pdev
);
3057 struct hclgevf_dev_specs_0_cmd
*req0
;
3058 struct hclgevf_dev_specs_1_cmd
*req1
;
3060 req0
= (struct hclgevf_dev_specs_0_cmd
*)desc
[0].data
;
3061 req1
= (struct hclgevf_dev_specs_1_cmd
*)desc
[1].data
;
3063 ae_dev
->dev_specs
.max_non_tso_bd_num
= req0
->max_non_tso_bd_num
;
3064 ae_dev
->dev_specs
.rss_ind_tbl_size
=
3065 le16_to_cpu(req0
->rss_ind_tbl_size
);
3066 ae_dev
->dev_specs
.int_ql_max
= le16_to_cpu(req0
->int_ql_max
);
3067 ae_dev
->dev_specs
.rss_key_size
= le16_to_cpu(req0
->rss_key_size
);
3068 ae_dev
->dev_specs
.max_int_gl
= le16_to_cpu(req1
->max_int_gl
);
3071 static void hclgevf_check_dev_specs(struct hclgevf_dev
*hdev
)
3073 struct hnae3_dev_specs
*dev_specs
= &hdev
->ae_dev
->dev_specs
;
3075 if (!dev_specs
->max_non_tso_bd_num
)
3076 dev_specs
->max_non_tso_bd_num
= HCLGEVF_MAX_NON_TSO_BD_NUM
;
3077 if (!dev_specs
->rss_ind_tbl_size
)
3078 dev_specs
->rss_ind_tbl_size
= HCLGEVF_RSS_IND_TBL_SIZE
;
3079 if (!dev_specs
->rss_key_size
)
3080 dev_specs
->rss_key_size
= HCLGEVF_RSS_KEY_SIZE
;
3081 if (!dev_specs
->max_int_gl
)
3082 dev_specs
->max_int_gl
= HCLGEVF_DEF_MAX_INT_GL
;
3085 static int hclgevf_query_dev_specs(struct hclgevf_dev
*hdev
)
3087 struct hclgevf_desc desc
[HCLGEVF_QUERY_DEV_SPECS_BD_NUM
];
3091 /* set default specifications as devices lower than version V3 do not
3092 * support querying specifications from firmware.
3094 if (hdev
->ae_dev
->dev_version
< HNAE3_DEVICE_VERSION_V3
) {
3095 hclgevf_set_default_dev_specs(hdev
);
3099 for (i
= 0; i
< HCLGEVF_QUERY_DEV_SPECS_BD_NUM
- 1; i
++) {
3100 hclgevf_cmd_setup_basic_desc(&desc
[i
],
3101 HCLGEVF_OPC_QUERY_DEV_SPECS
, true);
3102 desc
[i
].flag
|= cpu_to_le16(HCLGEVF_CMD_FLAG_NEXT
);
3104 hclgevf_cmd_setup_basic_desc(&desc
[i
], HCLGEVF_OPC_QUERY_DEV_SPECS
,
3107 ret
= hclgevf_cmd_send(&hdev
->hw
, desc
, HCLGEVF_QUERY_DEV_SPECS_BD_NUM
);
3111 hclgevf_parse_dev_specs(hdev
, desc
);
3112 hclgevf_check_dev_specs(hdev
);
3117 static int hclgevf_pci_reset(struct hclgevf_dev
*hdev
)
3119 struct pci_dev
*pdev
= hdev
->pdev
;
3122 if (hdev
->reset_type
== HNAE3_VF_FULL_RESET
&&
3123 test_bit(HCLGEVF_STATE_IRQ_INITED
, &hdev
->state
)) {
3124 hclgevf_misc_irq_uninit(hdev
);
3125 hclgevf_uninit_msi(hdev
);
3126 clear_bit(HCLGEVF_STATE_IRQ_INITED
, &hdev
->state
);
3129 if (!test_bit(HCLGEVF_STATE_IRQ_INITED
, &hdev
->state
)) {
3130 pci_set_master(pdev
);
3131 ret
= hclgevf_init_msi(hdev
);
3134 "failed(%d) to init MSI/MSI-X\n", ret
);
3138 ret
= hclgevf_misc_irq_init(hdev
);
3140 hclgevf_uninit_msi(hdev
);
3141 dev_err(&pdev
->dev
, "failed(%d) to init Misc IRQ(vector0)\n",
3146 set_bit(HCLGEVF_STATE_IRQ_INITED
, &hdev
->state
);
3152 static int hclgevf_clear_vport_list(struct hclgevf_dev
*hdev
)
3154 struct hclge_vf_to_pf_msg send_msg
;
3156 hclgevf_build_send_msg(&send_msg
, HCLGE_MBX_HANDLE_VF_TBL
,
3157 HCLGE_MBX_VPORT_LIST_CLEAR
);
3158 return hclgevf_send_mbx_msg(hdev
, &send_msg
, false, NULL
, 0);
3161 static int hclgevf_reset_hdev(struct hclgevf_dev
*hdev
)
3163 struct pci_dev
*pdev
= hdev
->pdev
;
3166 ret
= hclgevf_pci_reset(hdev
);
3168 dev_err(&pdev
->dev
, "pci reset failed %d\n", ret
);
3172 ret
= hclgevf_cmd_init(hdev
);
3174 dev_err(&pdev
->dev
, "cmd failed %d\n", ret
);
3178 ret
= hclgevf_rss_init_hw(hdev
);
3180 dev_err(&hdev
->pdev
->dev
,
3181 "failed(%d) to initialize RSS\n", ret
);
3185 ret
= hclgevf_config_gro(hdev
, true);
3189 ret
= hclgevf_init_vlan_config(hdev
);
3191 dev_err(&hdev
->pdev
->dev
,
3192 "failed(%d) to initialize VLAN config\n", ret
);
3196 set_bit(HCLGEVF_STATE_PROMISC_CHANGED
, &hdev
->state
);
3198 dev_info(&hdev
->pdev
->dev
, "Reset done\n");
3203 static int hclgevf_init_hdev(struct hclgevf_dev
*hdev
)
3205 struct pci_dev
*pdev
= hdev
->pdev
;
3208 ret
= hclgevf_pci_init(hdev
);
3212 ret
= hclgevf_cmd_queue_init(hdev
);
3214 goto err_cmd_queue_init
;
3216 ret
= hclgevf_cmd_init(hdev
);
3220 /* Get vf resource */
3221 ret
= hclgevf_query_vf_resource(hdev
);
3225 ret
= hclgevf_query_dev_specs(hdev
);
3228 "failed to query dev specifications, ret = %d\n", ret
);
3232 ret
= hclgevf_init_msi(hdev
);
3234 dev_err(&pdev
->dev
, "failed(%d) to init MSI/MSI-X\n", ret
);
3238 hclgevf_state_init(hdev
);
3239 hdev
->reset_level
= HNAE3_VF_FUNC_RESET
;
3240 hdev
->reset_type
= HNAE3_NONE_RESET
;
3242 ret
= hclgevf_misc_irq_init(hdev
);
3244 goto err_misc_irq_init
;
3246 set_bit(HCLGEVF_STATE_IRQ_INITED
, &hdev
->state
);
3248 ret
= hclgevf_configure(hdev
);
3250 dev_err(&pdev
->dev
, "failed(%d) to fetch configuration\n", ret
);
3254 ret
= hclgevf_alloc_tqps(hdev
);
3256 dev_err(&pdev
->dev
, "failed(%d) to allocate TQPs\n", ret
);
3260 ret
= hclgevf_set_handle_info(hdev
);
3264 ret
= hclgevf_config_gro(hdev
, true);
3268 /* Initialize RSS for this VF */
3269 hclgevf_rss_init_cfg(hdev
);
3270 ret
= hclgevf_rss_init_hw(hdev
);
3272 dev_err(&hdev
->pdev
->dev
,
3273 "failed(%d) to initialize RSS\n", ret
);
3277 /* ensure vf tbl list as empty before init*/
3278 ret
= hclgevf_clear_vport_list(hdev
);
3281 "failed to clear tbl list configuration, ret = %d.\n",
3286 ret
= hclgevf_init_vlan_config(hdev
);
3288 dev_err(&hdev
->pdev
->dev
,
3289 "failed(%d) to initialize VLAN config\n", ret
);
3293 hdev
->last_reset_time
= jiffies
;
3294 dev_info(&hdev
->pdev
->dev
, "finished initializing %s driver\n",
3295 HCLGEVF_DRIVER_NAME
);
3297 hclgevf_task_schedule(hdev
, round_jiffies_relative(HZ
));
3302 hclgevf_misc_irq_uninit(hdev
);
3304 hclgevf_state_uninit(hdev
);
3305 hclgevf_uninit_msi(hdev
);
3307 hclgevf_cmd_uninit(hdev
);
3309 hclgevf_pci_uninit(hdev
);
3310 clear_bit(HCLGEVF_STATE_IRQ_INITED
, &hdev
->state
);
3314 static void hclgevf_uninit_hdev(struct hclgevf_dev
*hdev
)
3316 struct hclge_vf_to_pf_msg send_msg
;
3318 hclgevf_state_uninit(hdev
);
3320 hclgevf_build_send_msg(&send_msg
, HCLGE_MBX_VF_UNINIT
, 0);
3321 hclgevf_send_mbx_msg(hdev
, &send_msg
, false, NULL
, 0);
3323 if (test_bit(HCLGEVF_STATE_IRQ_INITED
, &hdev
->state
)) {
3324 hclgevf_misc_irq_uninit(hdev
);
3325 hclgevf_uninit_msi(hdev
);
3328 hclgevf_cmd_uninit(hdev
);
3329 hclgevf_pci_uninit(hdev
);
3330 hclgevf_uninit_mac_list(hdev
);
3333 static int hclgevf_init_ae_dev(struct hnae3_ae_dev
*ae_dev
)
3335 struct pci_dev
*pdev
= ae_dev
->pdev
;
3338 ret
= hclgevf_alloc_hdev(ae_dev
);
3340 dev_err(&pdev
->dev
, "hclge device allocation failed\n");
3344 ret
= hclgevf_init_hdev(ae_dev
->priv
);
3346 dev_err(&pdev
->dev
, "hclge device initialization failed\n");
3353 static void hclgevf_uninit_ae_dev(struct hnae3_ae_dev
*ae_dev
)
3355 struct hclgevf_dev
*hdev
= ae_dev
->priv
;
3357 hclgevf_uninit_hdev(hdev
);
3358 ae_dev
->priv
= NULL
;
3361 static u32
hclgevf_get_max_channels(struct hclgevf_dev
*hdev
)
3363 struct hnae3_handle
*nic
= &hdev
->nic
;
3364 struct hnae3_knic_private_info
*kinfo
= &nic
->kinfo
;
3366 return min_t(u32
, hdev
->rss_size_max
,
3367 hdev
->num_tqps
/ kinfo
->tc_info
.num_tc
);
3371 * hclgevf_get_channels - Get the current channels enabled and max supported.
3372 * @handle: hardware information for network interface
3373 * @ch: ethtool channels structure
3375 * We don't support separate tx and rx queues as channels. The other count
3376 * represents how many queues are being used for control. max_combined counts
3377 * how many queue pairs we can support. They may not be mapped 1 to 1 with
3378 * q_vectors since we support a lot more queue pairs than q_vectors.
3380 static void hclgevf_get_channels(struct hnae3_handle
*handle
,
3381 struct ethtool_channels
*ch
)
3383 struct hclgevf_dev
*hdev
= hclgevf_ae_get_hdev(handle
);
3385 ch
->max_combined
= hclgevf_get_max_channels(hdev
);
3386 ch
->other_count
= 0;
3388 ch
->combined_count
= handle
->kinfo
.rss_size
;
3391 static void hclgevf_get_tqps_and_rss_info(struct hnae3_handle
*handle
,
3392 u16
*alloc_tqps
, u16
*max_rss_size
)
3394 struct hclgevf_dev
*hdev
= hclgevf_ae_get_hdev(handle
);
3396 *alloc_tqps
= hdev
->num_tqps
;
3397 *max_rss_size
= hdev
->rss_size_max
;
3400 static void hclgevf_update_rss_size(struct hnae3_handle
*handle
,
3403 struct hnae3_knic_private_info
*kinfo
= &handle
->kinfo
;
3404 struct hclgevf_dev
*hdev
= hclgevf_ae_get_hdev(handle
);
3407 kinfo
->req_rss_size
= new_tqps_num
;
3409 max_rss_size
= min_t(u16
, hdev
->rss_size_max
,
3410 hdev
->num_tqps
/ kinfo
->tc_info
.num_tc
);
3412 /* Use the user's configuration when it is not larger than
3413 * max_rss_size, otherwise, use the maximum specification value.
3415 if (kinfo
->req_rss_size
!= kinfo
->rss_size
&& kinfo
->req_rss_size
&&
3416 kinfo
->req_rss_size
<= max_rss_size
)
3417 kinfo
->rss_size
= kinfo
->req_rss_size
;
3418 else if (kinfo
->rss_size
> max_rss_size
||
3419 (!kinfo
->req_rss_size
&& kinfo
->rss_size
< max_rss_size
))
3420 kinfo
->rss_size
= max_rss_size
;
3422 kinfo
->num_tqps
= kinfo
->tc_info
.num_tc
* kinfo
->rss_size
;
3425 static int hclgevf_set_channels(struct hnae3_handle
*handle
, u32 new_tqps_num
,
3426 bool rxfh_configured
)
3428 struct hclgevf_dev
*hdev
= hclgevf_ae_get_hdev(handle
);
3429 struct hnae3_knic_private_info
*kinfo
= &handle
->kinfo
;
3430 u16 cur_rss_size
= kinfo
->rss_size
;
3431 u16 cur_tqps
= kinfo
->num_tqps
;
3436 hclgevf_update_rss_size(handle
, new_tqps_num
);
3438 ret
= hclgevf_set_rss_tc_mode(hdev
, kinfo
->rss_size
);
3442 /* RSS indirection table has been configuared by user */
3443 if (rxfh_configured
)
3446 /* Reinitializes the rss indirect table according to the new RSS size */
3447 rss_indir
= kcalloc(HCLGEVF_RSS_IND_TBL_SIZE
, sizeof(u32
), GFP_KERNEL
);
3451 for (i
= 0; i
< HCLGEVF_RSS_IND_TBL_SIZE
; i
++)
3452 rss_indir
[i
] = i
% kinfo
->rss_size
;
3454 hdev
->rss_cfg
.rss_size
= kinfo
->rss_size
;
3456 ret
= hclgevf_set_rss(handle
, rss_indir
, NULL
, 0);
3458 dev_err(&hdev
->pdev
->dev
, "set rss indir table fail, ret=%d\n",
3465 dev_info(&hdev
->pdev
->dev
,
3466 "Channels changed, rss_size from %u to %u, tqps from %u to %u",
3467 cur_rss_size
, kinfo
->rss_size
,
3468 cur_tqps
, kinfo
->rss_size
* kinfo
->tc_info
.num_tc
);
3473 static int hclgevf_get_status(struct hnae3_handle
*handle
)
3475 struct hclgevf_dev
*hdev
= hclgevf_ae_get_hdev(handle
);
3477 return hdev
->hw
.mac
.link
;
3480 static void hclgevf_get_ksettings_an_result(struct hnae3_handle
*handle
,
3481 u8
*auto_neg
, u32
*speed
,
3484 struct hclgevf_dev
*hdev
= hclgevf_ae_get_hdev(handle
);
3487 *speed
= hdev
->hw
.mac
.speed
;
3489 *duplex
= hdev
->hw
.mac
.duplex
;
3491 *auto_neg
= AUTONEG_DISABLE
;
3494 void hclgevf_update_speed_duplex(struct hclgevf_dev
*hdev
, u32 speed
,
3497 hdev
->hw
.mac
.speed
= speed
;
3498 hdev
->hw
.mac
.duplex
= duplex
;
3501 static int hclgevf_gro_en(struct hnae3_handle
*handle
, bool enable
)
3503 struct hclgevf_dev
*hdev
= hclgevf_ae_get_hdev(handle
);
3505 return hclgevf_config_gro(hdev
, enable
);
3508 static void hclgevf_get_media_type(struct hnae3_handle
*handle
, u8
*media_type
,
3511 struct hclgevf_dev
*hdev
= hclgevf_ae_get_hdev(handle
);
3514 *media_type
= hdev
->hw
.mac
.media_type
;
3517 *module_type
= hdev
->hw
.mac
.module_type
;
3520 static bool hclgevf_get_hw_reset_stat(struct hnae3_handle
*handle
)
3522 struct hclgevf_dev
*hdev
= hclgevf_ae_get_hdev(handle
);
3524 return !!hclgevf_read_dev(&hdev
->hw
, HCLGEVF_RST_ING
);
3527 static bool hclgevf_get_cmdq_stat(struct hnae3_handle
*handle
)
3529 struct hclgevf_dev
*hdev
= hclgevf_ae_get_hdev(handle
);
3531 return test_bit(HCLGEVF_STATE_CMD_DISABLE
, &hdev
->state
);
3534 static bool hclgevf_ae_dev_resetting(struct hnae3_handle
*handle
)
3536 struct hclgevf_dev
*hdev
= hclgevf_ae_get_hdev(handle
);
3538 return test_bit(HCLGEVF_STATE_RST_HANDLING
, &hdev
->state
);
3541 static unsigned long hclgevf_ae_dev_reset_cnt(struct hnae3_handle
*handle
)
3543 struct hclgevf_dev
*hdev
= hclgevf_ae_get_hdev(handle
);
3545 return hdev
->rst_stats
.hw_rst_done_cnt
;
3548 static void hclgevf_get_link_mode(struct hnae3_handle
*handle
,
3549 unsigned long *supported
,
3550 unsigned long *advertising
)
3552 struct hclgevf_dev
*hdev
= hclgevf_ae_get_hdev(handle
);
3554 *supported
= hdev
->hw
.mac
.supported
;
3555 *advertising
= hdev
->hw
.mac
.advertising
;
3558 #define MAX_SEPARATE_NUM 4
3559 #define SEPARATOR_VALUE 0xFFFFFFFF
3560 #define REG_NUM_PER_LINE 4
3561 #define REG_LEN_PER_LINE (REG_NUM_PER_LINE * sizeof(u32))
3563 static int hclgevf_get_regs_len(struct hnae3_handle
*handle
)
3565 int cmdq_lines
, common_lines
, ring_lines
, tqp_intr_lines
;
3566 struct hclgevf_dev
*hdev
= hclgevf_ae_get_hdev(handle
);
3568 cmdq_lines
= sizeof(cmdq_reg_addr_list
) / REG_LEN_PER_LINE
+ 1;
3569 common_lines
= sizeof(common_reg_addr_list
) / REG_LEN_PER_LINE
+ 1;
3570 ring_lines
= sizeof(ring_reg_addr_list
) / REG_LEN_PER_LINE
+ 1;
3571 tqp_intr_lines
= sizeof(tqp_intr_reg_addr_list
) / REG_LEN_PER_LINE
+ 1;
3573 return (cmdq_lines
+ common_lines
+ ring_lines
* hdev
->num_tqps
+
3574 tqp_intr_lines
* (hdev
->num_msi_used
- 1)) * REG_LEN_PER_LINE
;
3577 static void hclgevf_get_regs(struct hnae3_handle
*handle
, u32
*version
,
3580 struct hclgevf_dev
*hdev
= hclgevf_ae_get_hdev(handle
);
3581 int i
, j
, reg_um
, separator_num
;
3584 *version
= hdev
->fw_version
;
3586 /* fetching per-VF registers values from VF PCIe register space */
3587 reg_um
= sizeof(cmdq_reg_addr_list
) / sizeof(u32
);
3588 separator_num
= MAX_SEPARATE_NUM
- reg_um
% REG_NUM_PER_LINE
;
3589 for (i
= 0; i
< reg_um
; i
++)
3590 *reg
++ = hclgevf_read_dev(&hdev
->hw
, cmdq_reg_addr_list
[i
]);
3591 for (i
= 0; i
< separator_num
; i
++)
3592 *reg
++ = SEPARATOR_VALUE
;
3594 reg_um
= sizeof(common_reg_addr_list
) / sizeof(u32
);
3595 separator_num
= MAX_SEPARATE_NUM
- reg_um
% REG_NUM_PER_LINE
;
3596 for (i
= 0; i
< reg_um
; i
++)
3597 *reg
++ = hclgevf_read_dev(&hdev
->hw
, common_reg_addr_list
[i
]);
3598 for (i
= 0; i
< separator_num
; i
++)
3599 *reg
++ = SEPARATOR_VALUE
;
3601 reg_um
= sizeof(ring_reg_addr_list
) / sizeof(u32
);
3602 separator_num
= MAX_SEPARATE_NUM
- reg_um
% REG_NUM_PER_LINE
;
3603 for (j
= 0; j
< hdev
->num_tqps
; j
++) {
3604 for (i
= 0; i
< reg_um
; i
++)
3605 *reg
++ = hclgevf_read_dev(&hdev
->hw
,
3606 ring_reg_addr_list
[i
] +
3608 for (i
= 0; i
< separator_num
; i
++)
3609 *reg
++ = SEPARATOR_VALUE
;
3612 reg_um
= sizeof(tqp_intr_reg_addr_list
) / sizeof(u32
);
3613 separator_num
= MAX_SEPARATE_NUM
- reg_um
% REG_NUM_PER_LINE
;
3614 for (j
= 0; j
< hdev
->num_msi_used
- 1; j
++) {
3615 for (i
= 0; i
< reg_um
; i
++)
3616 *reg
++ = hclgevf_read_dev(&hdev
->hw
,
3617 tqp_intr_reg_addr_list
[i
] +
3619 for (i
= 0; i
< separator_num
; i
++)
3620 *reg
++ = SEPARATOR_VALUE
;
3624 void hclgevf_update_port_base_vlan_info(struct hclgevf_dev
*hdev
, u16 state
,
3625 u8
*port_base_vlan_info
, u8 data_size
)
3627 struct hnae3_handle
*nic
= &hdev
->nic
;
3628 struct hclge_vf_to_pf_msg send_msg
;
3633 if (test_bit(HCLGEVF_STATE_RST_HANDLING
, &hdev
->state
) ||
3634 test_bit(HCLGEVF_STATE_RST_FAIL
, &hdev
->state
)) {
3635 dev_warn(&hdev
->pdev
->dev
,
3636 "is resetting when updating port based vlan info\n");
3641 ret
= hclgevf_notify_client(hdev
, HNAE3_DOWN_CLIENT
);
3647 /* send msg to PF and wait update port based vlan info */
3648 hclgevf_build_send_msg(&send_msg
, HCLGE_MBX_SET_VLAN
,
3649 HCLGE_MBX_PORT_BASE_VLAN_CFG
);
3650 memcpy(send_msg
.data
, port_base_vlan_info
, data_size
);
3651 ret
= hclgevf_send_mbx_msg(hdev
, &send_msg
, false, NULL
, 0);
3653 if (state
== HNAE3_PORT_BASE_VLAN_DISABLE
)
3654 nic
->port_base_vlan_state
= state
;
3656 nic
->port_base_vlan_state
= HNAE3_PORT_BASE_VLAN_ENABLE
;
3659 hclgevf_notify_client(hdev
, HNAE3_UP_CLIENT
);
3663 static const struct hnae3_ae_ops hclgevf_ops
= {
3664 .init_ae_dev
= hclgevf_init_ae_dev
,
3665 .uninit_ae_dev
= hclgevf_uninit_ae_dev
,
3666 .flr_prepare
= hclgevf_flr_prepare
,
3667 .flr_done
= hclgevf_flr_done
,
3668 .init_client_instance
= hclgevf_init_client_instance
,
3669 .uninit_client_instance
= hclgevf_uninit_client_instance
,
3670 .start
= hclgevf_ae_start
,
3671 .stop
= hclgevf_ae_stop
,
3672 .client_start
= hclgevf_client_start
,
3673 .client_stop
= hclgevf_client_stop
,
3674 .map_ring_to_vector
= hclgevf_map_ring_to_vector
,
3675 .unmap_ring_from_vector
= hclgevf_unmap_ring_from_vector
,
3676 .get_vector
= hclgevf_get_vector
,
3677 .put_vector
= hclgevf_put_vector
,
3678 .reset_queue
= hclgevf_reset_tqp
,
3679 .get_mac_addr
= hclgevf_get_mac_addr
,
3680 .set_mac_addr
= hclgevf_set_mac_addr
,
3681 .add_uc_addr
= hclgevf_add_uc_addr
,
3682 .rm_uc_addr
= hclgevf_rm_uc_addr
,
3683 .add_mc_addr
= hclgevf_add_mc_addr
,
3684 .rm_mc_addr
= hclgevf_rm_mc_addr
,
3685 .get_stats
= hclgevf_get_stats
,
3686 .update_stats
= hclgevf_update_stats
,
3687 .get_strings
= hclgevf_get_strings
,
3688 .get_sset_count
= hclgevf_get_sset_count
,
3689 .get_rss_key_size
= hclgevf_get_rss_key_size
,
3690 .get_rss_indir_size
= hclgevf_get_rss_indir_size
,
3691 .get_rss
= hclgevf_get_rss
,
3692 .set_rss
= hclgevf_set_rss
,
3693 .get_rss_tuple
= hclgevf_get_rss_tuple
,
3694 .set_rss_tuple
= hclgevf_set_rss_tuple
,
3695 .get_tc_size
= hclgevf_get_tc_size
,
3696 .get_fw_version
= hclgevf_get_fw_version
,
3697 .set_vlan_filter
= hclgevf_set_vlan_filter
,
3698 .enable_hw_strip_rxvtag
= hclgevf_en_hw_strip_rxvtag
,
3699 .reset_event
= hclgevf_reset_event
,
3700 .set_default_reset_request
= hclgevf_set_def_reset_request
,
3701 .set_channels
= hclgevf_set_channels
,
3702 .get_channels
= hclgevf_get_channels
,
3703 .get_tqps_and_rss_info
= hclgevf_get_tqps_and_rss_info
,
3704 .get_regs_len
= hclgevf_get_regs_len
,
3705 .get_regs
= hclgevf_get_regs
,
3706 .get_status
= hclgevf_get_status
,
3707 .get_ksettings_an_result
= hclgevf_get_ksettings_an_result
,
3708 .get_media_type
= hclgevf_get_media_type
,
3709 .get_hw_reset_stat
= hclgevf_get_hw_reset_stat
,
3710 .ae_dev_resetting
= hclgevf_ae_dev_resetting
,
3711 .ae_dev_reset_cnt
= hclgevf_ae_dev_reset_cnt
,
3712 .set_gro_en
= hclgevf_gro_en
,
3713 .set_mtu
= hclgevf_set_mtu
,
3714 .get_global_queue_id
= hclgevf_get_qid_global
,
3715 .set_timer_task
= hclgevf_set_timer_task
,
3716 .get_link_mode
= hclgevf_get_link_mode
,
3717 .set_promisc_mode
= hclgevf_set_promisc_mode
,
3718 .request_update_promisc_mode
= hclgevf_request_update_promisc_mode
,
3719 .get_cmdq_stat
= hclgevf_get_cmdq_stat
,
3722 static struct hnae3_ae_algo ae_algovf
= {
3723 .ops
= &hclgevf_ops
,
3724 .pdev_id_table
= ae_algovf_pci_tbl
,
3727 static int hclgevf_init(void)
3729 pr_info("%s is initializing\n", HCLGEVF_NAME
);
3731 hclgevf_wq
= alloc_workqueue("%s", 0, 0, HCLGEVF_NAME
);
3733 pr_err("%s: failed to create workqueue\n", HCLGEVF_NAME
);
3737 hnae3_register_ae_algo(&ae_algovf
);
3742 static void hclgevf_exit(void)
3744 hnae3_unregister_ae_algo(&ae_algovf
);
3745 destroy_workqueue(hclgevf_wq
);
3747 module_init(hclgevf_init
);
3748 module_exit(hclgevf_exit
);
3750 MODULE_LICENSE("GPL");
3751 MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
3752 MODULE_DESCRIPTION("HCLGEVF Driver");
3753 MODULE_VERSION(HCLGEVF_MOD_VERSION
);