1 // SPDX-License-Identifier: GPL-2.0+
2 // Copyright (c) 2016-2017 Hisilicon Limited.
4 #include "hclge_main.h"
7 #include "hclge_comm_rss.h"
9 #define CREATE_TRACE_POINTS
10 #include "hclge_trace.h"
12 static u16
hclge_errno_to_resp(int errno
)
14 int resp
= abs(errno
);
16 /* The status for pf to vf msg cmd is u16, constrainted by HW.
17 * We need to keep the same type with it.
18 * The intput errno is the stander error code, it's safely to
19 * use a u16 to store the abs(errno).
24 /* hclge_gen_resp_to_vf: used to generate a synchronous response to VF when PF
25 * receives a mailbox message from VF.
26 * @vport: pointer to struct hclge_vport
27 * @vf_to_pf_req: pointer to hclge_mbx_vf_to_pf_cmd of the original mailbox
29 * @resp_status: indicate to VF whether its request success(0) or failed.
31 static int hclge_gen_resp_to_vf(struct hclge_vport
*vport
,
32 struct hclge_mbx_vf_to_pf_cmd
*vf_to_pf_req
,
33 struct hclge_respond_to_vf_msg
*resp_msg
)
35 struct hclge_mbx_pf_to_vf_cmd
*resp_pf_to_vf
;
36 struct hclge_dev
*hdev
= vport
->back
;
37 enum hclge_comm_cmd_status status
;
38 struct hclge_desc desc
;
41 resp_pf_to_vf
= (struct hclge_mbx_pf_to_vf_cmd
*)desc
.data
;
43 if (resp_msg
->len
> HCLGE_MBX_MAX_RESP_DATA_SIZE
) {
44 dev_err(&hdev
->pdev
->dev
,
45 "PF fail to gen resp to VF len %u exceeds max len %u\n",
47 HCLGE_MBX_MAX_RESP_DATA_SIZE
);
48 /* If resp_msg->len is too long, set the value to max length
49 * and return the msg to VF
51 resp_msg
->len
= HCLGE_MBX_MAX_RESP_DATA_SIZE
;
54 hclge_cmd_setup_basic_desc(&desc
, HCLGEVF_OPC_MBX_PF_TO_VF
, false);
56 resp_pf_to_vf
->dest_vfid
= vf_to_pf_req
->mbx_src_vfid
;
57 resp_pf_to_vf
->msg_len
= vf_to_pf_req
->msg_len
;
58 resp_pf_to_vf
->match_id
= vf_to_pf_req
->match_id
;
60 resp_pf_to_vf
->msg
.code
= cpu_to_le16(HCLGE_MBX_PF_VF_RESP
);
61 resp_pf_to_vf
->msg
.vf_mbx_msg_code
=
62 cpu_to_le16(vf_to_pf_req
->msg
.code
);
63 resp_pf_to_vf
->msg
.vf_mbx_msg_subcode
=
64 cpu_to_le16(vf_to_pf_req
->msg
.subcode
);
65 resp
= hclge_errno_to_resp(resp_msg
->status
);
66 if (resp
< SHRT_MAX
) {
67 resp_pf_to_vf
->msg
.resp_status
= cpu_to_le16(resp
);
69 dev_warn(&hdev
->pdev
->dev
,
70 "failed to send response to VF, response status %u is out-of-bound\n",
72 resp_pf_to_vf
->msg
.resp_status
= cpu_to_le16(EIO
);
75 if (resp_msg
->len
> 0)
76 memcpy(resp_pf_to_vf
->msg
.resp_data
, resp_msg
->data
,
79 trace_hclge_pf_mbx_send(hdev
, resp_pf_to_vf
);
81 status
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
83 dev_err(&hdev
->pdev
->dev
,
84 "failed to send response to VF, status: %d, vfid: %u, code: %u, subcode: %u.\n",
85 status
, vf_to_pf_req
->mbx_src_vfid
,
86 vf_to_pf_req
->msg
.code
, vf_to_pf_req
->msg
.subcode
);
91 static int hclge_send_mbx_msg(struct hclge_vport
*vport
, u8
*msg
, u16 msg_len
,
92 u16 mbx_opcode
, u8 dest_vfid
)
94 struct hclge_mbx_pf_to_vf_cmd
*resp_pf_to_vf
;
95 struct hclge_dev
*hdev
= vport
->back
;
96 enum hclge_comm_cmd_status status
;
97 struct hclge_desc desc
;
99 if (msg_len
> HCLGE_MBX_MAX_MSG_SIZE
) {
100 dev_err(&hdev
->pdev
->dev
,
101 "msg data length(=%u) exceeds maximum(=%u)\n",
102 msg_len
, HCLGE_MBX_MAX_MSG_SIZE
);
106 resp_pf_to_vf
= (struct hclge_mbx_pf_to_vf_cmd
*)desc
.data
;
108 hclge_cmd_setup_basic_desc(&desc
, HCLGEVF_OPC_MBX_PF_TO_VF
, false);
110 resp_pf_to_vf
->dest_vfid
= dest_vfid
;
111 resp_pf_to_vf
->msg_len
= msg_len
;
112 resp_pf_to_vf
->msg
.code
= cpu_to_le16(mbx_opcode
);
114 memcpy(resp_pf_to_vf
->msg
.msg_data
, msg
, msg_len
);
116 trace_hclge_pf_mbx_send(hdev
, resp_pf_to_vf
);
118 status
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
120 dev_err(&hdev
->pdev
->dev
,
121 "failed to send mailbox to VF, status: %d, vfid: %u, opcode: %u\n",
122 status
, dest_vfid
, mbx_opcode
);
127 int hclge_inform_vf_reset(struct hclge_vport
*vport
, u16 reset_type
)
132 dest_vfid
= (u8
)vport
->vport_id
;
133 msg_data
= cpu_to_le16(reset_type
);
135 /* send this requested info to VF */
136 return hclge_send_mbx_msg(vport
, (u8
*)&msg_data
, sizeof(msg_data
),
137 HCLGE_MBX_ASSERTING_RESET
, dest_vfid
);
140 int hclge_inform_reset_assert_to_vf(struct hclge_vport
*vport
)
142 struct hclge_dev
*hdev
= vport
->back
;
145 BUILD_BUG_ON(HNAE3_MAX_RESET
> U16_MAX
);
147 if (hdev
->reset_type
== HNAE3_FUNC_RESET
)
148 reset_type
= HNAE3_VF_PF_FUNC_RESET
;
149 else if (hdev
->reset_type
== HNAE3_FLR_RESET
)
150 reset_type
= HNAE3_VF_FULL_RESET
;
152 reset_type
= HNAE3_VF_FUNC_RESET
;
154 return hclge_inform_vf_reset(vport
, reset_type
);
157 static void hclge_free_vector_ring_chain(struct hnae3_ring_chain_node
*head
)
159 struct hnae3_ring_chain_node
*chain_tmp
, *chain
;
164 chain_tmp
= chain
->next
;
165 kfree_sensitive(chain
);
170 /* hclge_get_ring_chain_from_mbx: get ring type & tqp id & int_gl idx
171 * from mailbox message
173 * msg[1]: <not relevant to this function>
175 * msg[3]: first ring type (TX|RX)
176 * msg[4]: first tqp id
177 * msg[5]: first int_gl idx
178 * msg[6] ~ msg[14]: other ring type, tqp id and int_gl idx
180 static int hclge_get_ring_chain_from_mbx(
181 struct hclge_mbx_vf_to_pf_cmd
*req
,
182 struct hnae3_ring_chain_node
*ring_chain
,
183 struct hclge_vport
*vport
)
185 struct hnae3_ring_chain_node
*cur_chain
, *new_chain
;
186 struct hclge_dev
*hdev
= vport
->back
;
190 ring_num
= req
->msg
.ring_num
;
192 if (ring_num
> HCLGE_MBX_MAX_RING_CHAIN_PARAM_NUM
)
195 for (i
= 0; i
< ring_num
; i
++) {
196 if (req
->msg
.param
[i
].tqp_index
>= vport
->nic
.kinfo
.rss_size
) {
197 dev_err(&hdev
->pdev
->dev
, "tqp index(%u) is out of range(0-%u)\n",
198 req
->msg
.param
[i
].tqp_index
,
199 vport
->nic
.kinfo
.rss_size
- 1U);
204 hnae3_set_bit(ring_chain
->flag
, HNAE3_RING_TYPE_B
,
205 req
->msg
.param
[0].ring_type
);
206 ring_chain
->tqp_index
=
207 hclge_get_queue_id(vport
->nic
.kinfo
.tqp
208 [req
->msg
.param
[0].tqp_index
]);
209 hnae3_set_field(ring_chain
->int_gl_idx
, HNAE3_RING_GL_IDX_M
,
210 HNAE3_RING_GL_IDX_S
, req
->msg
.param
[0].int_gl_index
);
212 cur_chain
= ring_chain
;
214 for (i
= 1; i
< ring_num
; i
++) {
215 new_chain
= kzalloc(sizeof(*new_chain
), GFP_KERNEL
);
219 hnae3_set_bit(new_chain
->flag
, HNAE3_RING_TYPE_B
,
220 req
->msg
.param
[i
].ring_type
);
222 new_chain
->tqp_index
=
223 hclge_get_queue_id(vport
->nic
.kinfo
.tqp
224 [req
->msg
.param
[i
].tqp_index
]);
226 hnae3_set_field(new_chain
->int_gl_idx
, HNAE3_RING_GL_IDX_M
,
228 req
->msg
.param
[i
].int_gl_index
);
230 cur_chain
->next
= new_chain
;
231 cur_chain
= new_chain
;
236 hclge_free_vector_ring_chain(ring_chain
);
240 static int hclge_map_unmap_ring_to_vf_vector(struct hclge_vport
*vport
, bool en
,
241 struct hclge_mbx_vf_to_pf_cmd
*req
)
243 struct hnae3_ring_chain_node ring_chain
;
244 int vector_id
= req
->msg
.vector_id
;
247 memset(&ring_chain
, 0, sizeof(ring_chain
));
248 ret
= hclge_get_ring_chain_from_mbx(req
, &ring_chain
, vport
);
252 ret
= hclge_bind_ring_with_vector(vport
, vector_id
, en
, &ring_chain
);
254 hclge_free_vector_ring_chain(&ring_chain
);
259 static int hclge_query_ring_vector_map(struct hclge_vport
*vport
,
260 struct hnae3_ring_chain_node
*ring_chain
,
261 struct hclge_desc
*desc
)
263 struct hclge_ctrl_vector_chain_cmd
*req
=
264 (struct hclge_ctrl_vector_chain_cmd
*)desc
->data
;
265 struct hclge_dev
*hdev
= vport
->back
;
269 hclge_cmd_setup_basic_desc(desc
, HCLGE_OPC_ADD_RING_TO_VECTOR
, true);
271 tqp_type_and_id
= le16_to_cpu(req
->tqp_type_and_id
[0]);
272 hnae3_set_field(tqp_type_and_id
, HCLGE_INT_TYPE_M
, HCLGE_INT_TYPE_S
,
273 hnae3_get_bit(ring_chain
->flag
, HNAE3_RING_TYPE_B
));
274 hnae3_set_field(tqp_type_and_id
, HCLGE_TQP_ID_M
, HCLGE_TQP_ID_S
,
275 ring_chain
->tqp_index
);
276 req
->tqp_type_and_id
[0] = cpu_to_le16(tqp_type_and_id
);
277 req
->vfid
= vport
->vport_id
;
279 status
= hclge_cmd_send(&hdev
->hw
, desc
, 1);
281 dev_err(&hdev
->pdev
->dev
,
282 "Get VF ring vector map info fail, status is %d.\n",
288 static int hclge_get_vf_ring_vector_map(struct hclge_vport
*vport
,
289 struct hclge_mbx_vf_to_pf_cmd
*req
,
290 struct hclge_respond_to_vf_msg
*resp
)
292 #define HCLGE_LIMIT_RING_NUM 1
293 #define HCLGE_RING_TYPE_OFFSET 0
294 #define HCLGE_TQP_INDEX_OFFSET 1
295 #define HCLGE_INT_GL_INDEX_OFFSET 2
296 #define HCLGE_VECTOR_ID_OFFSET 3
297 #define HCLGE_RING_VECTOR_MAP_INFO_LEN 4
298 struct hnae3_ring_chain_node ring_chain
;
299 struct hclge_desc desc
;
300 struct hclge_ctrl_vector_chain_cmd
*data
=
301 (struct hclge_ctrl_vector_chain_cmd
*)desc
.data
;
306 req
->msg
.ring_num
= HCLGE_LIMIT_RING_NUM
;
308 memset(&ring_chain
, 0, sizeof(ring_chain
));
309 ret
= hclge_get_ring_chain_from_mbx(req
, &ring_chain
, vport
);
313 ret
= hclge_query_ring_vector_map(vport
, &ring_chain
, &desc
);
315 hclge_free_vector_ring_chain(&ring_chain
);
319 tqp_type_and_id
= le16_to_cpu(data
->tqp_type_and_id
[0]);
320 int_gl_index
= hnae3_get_field(tqp_type_and_id
,
321 HCLGE_INT_GL_IDX_M
, HCLGE_INT_GL_IDX_S
);
323 resp
->data
[HCLGE_RING_TYPE_OFFSET
] = req
->msg
.param
[0].ring_type
;
324 resp
->data
[HCLGE_TQP_INDEX_OFFSET
] = req
->msg
.param
[0].tqp_index
;
325 resp
->data
[HCLGE_INT_GL_INDEX_OFFSET
] = int_gl_index
;
326 resp
->data
[HCLGE_VECTOR_ID_OFFSET
] = data
->int_vector_id_l
;
327 resp
->len
= HCLGE_RING_VECTOR_MAP_INFO_LEN
;
329 hclge_free_vector_ring_chain(&ring_chain
);
334 static void hclge_set_vf_promisc_mode(struct hclge_vport
*vport
,
335 struct hclge_mbx_vf_to_pf_cmd
*req
)
337 struct hnae3_handle
*handle
= &vport
->nic
;
338 struct hclge_dev
*hdev
= vport
->back
;
340 vport
->vf_info
.request_uc_en
= req
->msg
.en_uc
;
341 vport
->vf_info
.request_mc_en
= req
->msg
.en_mc
;
342 vport
->vf_info
.request_bc_en
= req
->msg
.en_bc
;
344 if (req
->msg
.en_limit_promisc
)
345 set_bit(HNAE3_PFLAG_LIMIT_PROMISC
, &handle
->priv_flags
);
347 clear_bit(HNAE3_PFLAG_LIMIT_PROMISC
,
348 &handle
->priv_flags
);
350 set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE
, &vport
->state
);
351 hclge_task_schedule(hdev
, 0);
354 static int hclge_set_vf_uc_mac_addr(struct hclge_vport
*vport
,
355 struct hclge_mbx_vf_to_pf_cmd
*mbx_req
)
357 #define HCLGE_MBX_VF_OLD_MAC_ADDR_OFFSET 6
359 const u8
*mac_addr
= (const u8
*)(mbx_req
->msg
.data
);
360 struct hclge_dev
*hdev
= vport
->back
;
363 if (mbx_req
->msg
.subcode
== HCLGE_MBX_MAC_VLAN_UC_MODIFY
) {
364 const u8
*old_addr
= (const u8
*)
365 (&mbx_req
->msg
.data
[HCLGE_MBX_VF_OLD_MAC_ADDR_OFFSET
]);
367 /* If VF MAC has been configured by the host then it
368 * cannot be overridden by the MAC specified by the VM.
370 if (!is_zero_ether_addr(vport
->vf_info
.mac
) &&
371 !ether_addr_equal(mac_addr
, vport
->vf_info
.mac
))
374 if (!is_valid_ether_addr(mac_addr
))
377 spin_lock_bh(&vport
->mac_list_lock
);
378 status
= hclge_update_mac_node_for_dev_addr(vport
, old_addr
,
380 spin_unlock_bh(&vport
->mac_list_lock
);
381 hclge_task_schedule(hdev
, 0);
382 } else if (mbx_req
->msg
.subcode
== HCLGE_MBX_MAC_VLAN_UC_ADD
) {
383 status
= hclge_update_mac_list(vport
, HCLGE_MAC_TO_ADD
,
384 HCLGE_MAC_ADDR_UC
, mac_addr
);
385 } else if (mbx_req
->msg
.subcode
== HCLGE_MBX_MAC_VLAN_UC_REMOVE
) {
386 status
= hclge_update_mac_list(vport
, HCLGE_MAC_TO_DEL
,
387 HCLGE_MAC_ADDR_UC
, mac_addr
);
389 dev_err(&hdev
->pdev
->dev
,
390 "failed to set unicast mac addr, unknown subcode %u\n",
391 mbx_req
->msg
.subcode
);
398 static int hclge_set_vf_mc_mac_addr(struct hclge_vport
*vport
,
399 struct hclge_mbx_vf_to_pf_cmd
*mbx_req
)
401 const u8
*mac_addr
= (const u8
*)(mbx_req
->msg
.data
);
402 struct hclge_dev
*hdev
= vport
->back
;
404 if (mbx_req
->msg
.subcode
== HCLGE_MBX_MAC_VLAN_MC_ADD
) {
405 hclge_update_mac_list(vport
, HCLGE_MAC_TO_ADD
,
406 HCLGE_MAC_ADDR_MC
, mac_addr
);
407 } else if (mbx_req
->msg
.subcode
== HCLGE_MBX_MAC_VLAN_MC_REMOVE
) {
408 hclge_update_mac_list(vport
, HCLGE_MAC_TO_DEL
,
409 HCLGE_MAC_ADDR_MC
, mac_addr
);
411 dev_err(&hdev
->pdev
->dev
,
412 "failed to set mcast mac addr, unknown subcode %u\n",
413 mbx_req
->msg
.subcode
);
420 int hclge_push_vf_port_base_vlan_info(struct hclge_vport
*vport
, u8 vfid
,
422 struct hclge_vlan_info
*vlan_info
)
424 struct hclge_mbx_port_base_vlan base_vlan
;
426 base_vlan
.state
= cpu_to_le16(state
);
427 base_vlan
.vlan_proto
= cpu_to_le16(vlan_info
->vlan_proto
);
428 base_vlan
.qos
= cpu_to_le16(vlan_info
->qos
);
429 base_vlan
.vlan_tag
= cpu_to_le16(vlan_info
->vlan_tag
);
431 return hclge_send_mbx_msg(vport
, (u8
*)&base_vlan
, sizeof(base_vlan
),
432 HCLGE_MBX_PUSH_VLAN_INFO
, vfid
);
435 static int hclge_set_vf_vlan_cfg(struct hclge_vport
*vport
,
436 struct hclge_mbx_vf_to_pf_cmd
*mbx_req
,
437 struct hclge_respond_to_vf_msg
*resp_msg
)
439 #define HCLGE_MBX_VLAN_STATE_OFFSET 0
440 #define HCLGE_MBX_VLAN_INFO_OFFSET 2
442 struct hnae3_handle
*handle
= &vport
->nic
;
443 struct hclge_dev
*hdev
= vport
->back
;
444 struct hclge_vf_vlan_cfg
*msg_cmd
;
448 msg_cmd
= (struct hclge_vf_vlan_cfg
*)&mbx_req
->msg
;
449 switch (msg_cmd
->subcode
) {
450 case HCLGE_MBX_VLAN_FILTER
:
451 proto
= cpu_to_be16(le16_to_cpu(msg_cmd
->proto
));
452 vlan_id
= le16_to_cpu(msg_cmd
->vlan
);
453 return hclge_set_vlan_filter(handle
, proto
, vlan_id
,
455 case HCLGE_MBX_VLAN_RX_OFF_CFG
:
456 return hclge_en_hw_strip_rxvtag(handle
, msg_cmd
->enable
);
457 case HCLGE_MBX_GET_PORT_BASE_VLAN_STATE
:
458 /* vf does not need to know about the port based VLAN state
459 * on device HNAE3_DEVICE_VERSION_V3. So always return disable
460 * on device HNAE3_DEVICE_VERSION_V3 if vf queries the port
464 hdev
->ae_dev
->dev_version
>= HNAE3_DEVICE_VERSION_V3
?
465 HNAE3_PORT_BASE_VLAN_DISABLE
:
466 vport
->port_base_vlan_cfg
.state
;
467 resp_msg
->len
= sizeof(u8
);
469 case HCLGE_MBX_ENABLE_VLAN_FILTER
:
470 return hclge_enable_vport_vlan_filter(vport
, msg_cmd
->enable
);
476 static int hclge_set_vf_alive(struct hclge_vport
*vport
,
477 struct hclge_mbx_vf_to_pf_cmd
*mbx_req
)
479 bool alive
= !!mbx_req
->msg
.data
[0];
483 ret
= hclge_vport_start(vport
);
485 hclge_vport_stop(vport
);
490 static void hclge_get_basic_info(struct hclge_vport
*vport
,
491 struct hclge_respond_to_vf_msg
*resp_msg
)
493 struct hnae3_knic_private_info
*kinfo
= &vport
->nic
.kinfo
;
494 struct hnae3_ae_dev
*ae_dev
= vport
->back
->ae_dev
;
495 struct hclge_basic_info
*basic_info
;
499 basic_info
= (struct hclge_basic_info
*)resp_msg
->data
;
500 for (i
= 0; i
< kinfo
->tc_info
.num_tc
; i
++)
501 basic_info
->hw_tc_map
|= BIT(i
);
503 pf_caps
= le32_to_cpu(basic_info
->pf_caps
);
504 if (test_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B
, ae_dev
->caps
))
505 hnae3_set_bit(pf_caps
, HNAE3_PF_SUPPORT_VLAN_FLTR_MDF_B
, 1);
507 basic_info
->pf_caps
= cpu_to_le32(pf_caps
);
508 resp_msg
->len
= HCLGE_MBX_MAX_RESP_DATA_SIZE
;
511 static void hclge_get_vf_queue_info(struct hclge_vport
*vport
,
512 struct hclge_respond_to_vf_msg
*resp_msg
)
514 #define HCLGE_TQPS_RSS_INFO_LEN 6
516 struct hclge_mbx_vf_queue_info
*queue_info
;
517 struct hclge_dev
*hdev
= vport
->back
;
519 /* get the queue related info */
520 queue_info
= (struct hclge_mbx_vf_queue_info
*)resp_msg
->data
;
521 queue_info
->num_tqps
= cpu_to_le16(vport
->alloc_tqps
);
522 queue_info
->rss_size
= cpu_to_le16(vport
->nic
.kinfo
.rss_size
);
523 queue_info
->rx_buf_len
= cpu_to_le16(hdev
->rx_buf_len
);
524 resp_msg
->len
= HCLGE_TQPS_RSS_INFO_LEN
;
527 static void hclge_get_vf_mac_addr(struct hclge_vport
*vport
,
528 struct hclge_respond_to_vf_msg
*resp_msg
)
530 ether_addr_copy(resp_msg
->data
, vport
->vf_info
.mac
);
531 resp_msg
->len
= ETH_ALEN
;
534 static void hclge_get_vf_queue_depth(struct hclge_vport
*vport
,
535 struct hclge_respond_to_vf_msg
*resp_msg
)
537 #define HCLGE_TQPS_DEPTH_INFO_LEN 4
539 struct hclge_mbx_vf_queue_depth
*queue_depth
;
540 struct hclge_dev
*hdev
= vport
->back
;
542 /* get the queue depth info */
543 queue_depth
= (struct hclge_mbx_vf_queue_depth
*)resp_msg
->data
;
544 queue_depth
->num_tx_desc
= cpu_to_le16(hdev
->num_tx_desc
);
545 queue_depth
->num_rx_desc
= cpu_to_le16(hdev
->num_rx_desc
);
547 resp_msg
->len
= HCLGE_TQPS_DEPTH_INFO_LEN
;
550 static void hclge_get_vf_media_type(struct hclge_vport
*vport
,
551 struct hclge_respond_to_vf_msg
*resp_msg
)
553 #define HCLGE_VF_MEDIA_TYPE_OFFSET 0
554 #define HCLGE_VF_MODULE_TYPE_OFFSET 1
555 #define HCLGE_VF_MEDIA_TYPE_LENGTH 2
557 struct hclge_dev
*hdev
= vport
->back
;
559 resp_msg
->data
[HCLGE_VF_MEDIA_TYPE_OFFSET
] =
560 hdev
->hw
.mac
.media_type
;
561 resp_msg
->data
[HCLGE_VF_MODULE_TYPE_OFFSET
] =
562 hdev
->hw
.mac
.module_type
;
563 resp_msg
->len
= HCLGE_VF_MEDIA_TYPE_LENGTH
;
566 int hclge_push_vf_link_status(struct hclge_vport
*vport
)
568 #define HCLGE_VF_LINK_STATE_UP 1U
569 #define HCLGE_VF_LINK_STATE_DOWN 0U
571 struct hclge_mbx_link_status link_info
;
572 struct hclge_dev
*hdev
= vport
->back
;
575 /* mac.link can only be 0 or 1 */
576 switch (vport
->vf_info
.link_state
) {
577 case IFLA_VF_LINK_STATE_ENABLE
:
578 link_status
= HCLGE_VF_LINK_STATE_UP
;
580 case IFLA_VF_LINK_STATE_DISABLE
:
581 link_status
= HCLGE_VF_LINK_STATE_DOWN
;
583 case IFLA_VF_LINK_STATE_AUTO
:
585 link_status
= (u16
)hdev
->hw
.mac
.link
;
589 link_info
.link_status
= cpu_to_le16(link_status
);
590 link_info
.speed
= cpu_to_le32(hdev
->hw
.mac
.speed
);
591 link_info
.duplex
= cpu_to_le16(hdev
->hw
.mac
.duplex
);
592 link_info
.flag
= HCLGE_MBX_PUSH_LINK_STATUS_EN
;
594 /* send this requested info to VF */
595 return hclge_send_mbx_msg(vport
, (u8
*)&link_info
, sizeof(link_info
),
596 HCLGE_MBX_LINK_STAT_CHANGE
, vport
->vport_id
);
599 static void hclge_get_link_mode(struct hclge_vport
*vport
,
600 struct hclge_mbx_vf_to_pf_cmd
*mbx_req
)
602 #define HCLGE_SUPPORTED 1
603 struct hclge_mbx_link_mode link_mode
;
604 struct hclge_dev
*hdev
= vport
->back
;
605 unsigned long advertising
;
606 unsigned long supported
;
607 unsigned long send_data
;
610 advertising
= hdev
->hw
.mac
.advertising
[0];
611 supported
= hdev
->hw
.mac
.supported
[0];
612 dest_vfid
= mbx_req
->mbx_src_vfid
;
613 send_data
= mbx_req
->msg
.data
[0] == HCLGE_SUPPORTED
? supported
:
615 link_mode
.idx
= cpu_to_le16((u16
)mbx_req
->msg
.data
[0]);
616 link_mode
.link_mode
= cpu_to_le64(send_data
);
618 hclge_send_mbx_msg(vport
, (u8
*)&link_mode
, sizeof(link_mode
),
619 HCLGE_MBX_LINK_STAT_MODE
, dest_vfid
);
622 static int hclge_mbx_reset_vf_queue(struct hclge_vport
*vport
,
623 struct hclge_mbx_vf_to_pf_cmd
*mbx_req
,
624 struct hclge_respond_to_vf_msg
*resp_msg
)
626 #define HCLGE_RESET_ALL_QUEUE_DONE 1U
627 struct hnae3_handle
*handle
= &vport
->nic
;
628 struct hclge_dev
*hdev
= vport
->back
;
632 queue_id
= le16_to_cpu(*(__le16
*)mbx_req
->msg
.data
);
633 resp_msg
->data
[0] = HCLGE_RESET_ALL_QUEUE_DONE
;
634 resp_msg
->len
= sizeof(u8
);
636 /* pf will reset vf's all queues at a time. So it is unnecessary
637 * to reset queues if queue_id > 0, just return success.
642 ret
= hclge_reset_tqp(handle
);
644 dev_err(&hdev
->pdev
->dev
, "failed to reset vf %u queue, ret = %d\n",
645 vport
->vport_id
- HCLGE_VF_VPORT_START_NUM
, ret
);
650 static int hclge_reset_vf(struct hclge_vport
*vport
)
652 struct hclge_dev
*hdev
= vport
->back
;
654 dev_warn(&hdev
->pdev
->dev
, "PF received VF reset request from VF %u!",
655 vport
->vport_id
- HCLGE_VF_VPORT_START_NUM
);
657 return hclge_func_reset_cmd(hdev
, vport
->vport_id
);
660 static void hclge_notify_vf_config(struct hclge_vport
*vport
)
662 struct hclge_dev
*hdev
= vport
->back
;
663 struct hnae3_ae_dev
*ae_dev
= pci_get_drvdata(hdev
->pdev
);
664 struct hclge_port_base_vlan_config
*vlan_cfg
;
667 hclge_push_vf_link_status(vport
);
668 if (test_bit(HCLGE_VPORT_NEED_NOTIFY_RESET
, &vport
->need_notify
)) {
669 ret
= hclge_inform_vf_reset(vport
, HNAE3_VF_PF_FUNC_RESET
);
671 dev_err(&hdev
->pdev
->dev
,
672 "failed to inform VF %u reset!",
673 vport
->vport_id
- HCLGE_VF_VPORT_START_NUM
);
676 vport
->need_notify
= 0;
680 if (ae_dev
->dev_version
< HNAE3_DEVICE_VERSION_V3
&&
681 test_bit(HCLGE_VPORT_NEED_NOTIFY_VF_VLAN
, &vport
->need_notify
)) {
682 vlan_cfg
= &vport
->port_base_vlan_cfg
;
683 ret
= hclge_push_vf_port_base_vlan_info(&hdev
->vport
[0],
686 &vlan_cfg
->vlan_info
);
688 dev_err(&hdev
->pdev
->dev
,
689 "failed to inform VF %u port base vlan!",
690 vport
->vport_id
- HCLGE_VF_VPORT_START_NUM
);
693 clear_bit(HCLGE_VPORT_NEED_NOTIFY_VF_VLAN
, &vport
->need_notify
);
697 static void hclge_vf_keep_alive(struct hclge_vport
*vport
)
699 struct hclge_dev
*hdev
= vport
->back
;
701 vport
->last_active_jiffies
= jiffies
;
703 if (test_bit(HCLGE_VPORT_STATE_INITED
, &vport
->state
) &&
704 !test_bit(HCLGE_VPORT_STATE_ALIVE
, &vport
->state
)) {
705 set_bit(HCLGE_VPORT_STATE_ALIVE
, &vport
->state
);
706 dev_info(&hdev
->pdev
->dev
, "VF %u is alive!",
707 vport
->vport_id
- HCLGE_VF_VPORT_START_NUM
);
708 hclge_notify_vf_config(vport
);
712 static int hclge_set_vf_mtu(struct hclge_vport
*vport
,
713 struct hclge_mbx_vf_to_pf_cmd
*mbx_req
)
715 struct hclge_mbx_mtu_info
*mtu_info
;
718 mtu_info
= (struct hclge_mbx_mtu_info
*)mbx_req
->msg
.data
;
719 mtu
= le32_to_cpu(mtu_info
->mtu
);
721 return hclge_set_vport_mtu(vport
, mtu
);
724 static int hclge_get_queue_id_in_pf(struct hclge_vport
*vport
,
725 struct hclge_mbx_vf_to_pf_cmd
*mbx_req
,
726 struct hclge_respond_to_vf_msg
*resp_msg
)
728 struct hnae3_handle
*handle
= &vport
->nic
;
729 struct hclge_dev
*hdev
= vport
->back
;
730 u16 queue_id
, qid_in_pf
;
732 queue_id
= le16_to_cpu(*(__le16
*)mbx_req
->msg
.data
);
733 if (queue_id
>= handle
->kinfo
.num_tqps
) {
734 dev_err(&hdev
->pdev
->dev
, "Invalid queue id(%u) from VF %u\n",
735 queue_id
, mbx_req
->mbx_src_vfid
);
739 qid_in_pf
= hclge_covert_handle_qid_global(&vport
->nic
, queue_id
);
740 *(__le16
*)resp_msg
->data
= cpu_to_le16(qid_in_pf
);
741 resp_msg
->len
= sizeof(qid_in_pf
);
745 static int hclge_get_rss_key(struct hclge_vport
*vport
,
746 struct hclge_mbx_vf_to_pf_cmd
*mbx_req
,
747 struct hclge_respond_to_vf_msg
*resp_msg
)
749 #define HCLGE_RSS_MBX_RESP_LEN 8
750 struct hclge_dev
*hdev
= vport
->back
;
751 struct hclge_comm_rss_cfg
*rss_cfg
;
754 index
= mbx_req
->msg
.data
[0];
755 rss_cfg
= &hdev
->rss_cfg
;
757 /* Check the query index of rss_hash_key from VF, make sure no
758 * more than the size of rss_hash_key.
760 if (((index
+ 1) * HCLGE_RSS_MBX_RESP_LEN
) >
761 sizeof(rss_cfg
->rss_hash_key
)) {
762 dev_warn(&hdev
->pdev
->dev
,
763 "failed to get the rss hash key, the index(%u) invalid !\n",
768 memcpy(resp_msg
->data
,
769 &rss_cfg
->rss_hash_key
[index
* HCLGE_RSS_MBX_RESP_LEN
],
770 HCLGE_RSS_MBX_RESP_LEN
);
771 resp_msg
->len
= HCLGE_RSS_MBX_RESP_LEN
;
775 static void hclge_link_fail_parse(struct hclge_dev
*hdev
, u8 link_fail_code
)
777 switch (link_fail_code
) {
778 case HCLGE_LF_REF_CLOCK_LOST
:
779 dev_warn(&hdev
->pdev
->dev
, "Reference clock lost!\n");
781 case HCLGE_LF_XSFP_TX_DISABLE
:
782 dev_warn(&hdev
->pdev
->dev
, "SFP tx is disabled!\n");
784 case HCLGE_LF_XSFP_ABSENT
:
785 dev_warn(&hdev
->pdev
->dev
, "SFP is absent!\n");
792 static void hclge_handle_link_change_event(struct hclge_dev
*hdev
,
793 struct hclge_mbx_vf_to_pf_cmd
*req
)
795 hclge_task_schedule(hdev
, 0);
797 if (!req
->msg
.subcode
)
798 hclge_link_fail_parse(hdev
, req
->msg
.data
[0]);
801 static bool hclge_cmd_crq_empty(struct hclge_hw
*hw
)
803 u32 tail
= hclge_read_dev(hw
, HCLGE_COMM_NIC_CRQ_TAIL_REG
);
805 return tail
== hw
->hw
.cmq
.crq
.next_to_use
;
808 static void hclge_handle_ncsi_error(struct hclge_dev
*hdev
)
810 struct hnae3_ae_dev
*ae_dev
= hdev
->ae_dev
;
812 ae_dev
->ops
->set_default_reset_request(ae_dev
, HNAE3_GLOBAL_RESET
);
813 dev_warn(&hdev
->pdev
->dev
, "requesting reset due to NCSI error\n");
814 ae_dev
->ops
->reset_event(hdev
->pdev
, NULL
);
817 static void hclge_handle_vf_tbl(struct hclge_vport
*vport
,
818 struct hclge_mbx_vf_to_pf_cmd
*mbx_req
)
820 struct hclge_dev
*hdev
= vport
->back
;
821 struct hclge_vf_vlan_cfg
*msg_cmd
;
823 msg_cmd
= (struct hclge_vf_vlan_cfg
*)&mbx_req
->msg
;
824 if (msg_cmd
->subcode
== HCLGE_MBX_VPORT_LIST_CLEAR
) {
825 hclge_rm_vport_all_mac_table(vport
, true, HCLGE_MAC_ADDR_UC
);
826 hclge_rm_vport_all_mac_table(vport
, true, HCLGE_MAC_ADDR_MC
);
827 hclge_rm_vport_all_vlan_table(vport
, true);
829 dev_warn(&hdev
->pdev
->dev
, "Invalid cmd(%u)\n",
835 hclge_mbx_map_ring_to_vector_handler(struct hclge_mbx_ops_param
*param
)
837 return hclge_map_unmap_ring_to_vf_vector(param
->vport
, true,
842 hclge_mbx_unmap_ring_to_vector_handler(struct hclge_mbx_ops_param
*param
)
844 return hclge_map_unmap_ring_to_vf_vector(param
->vport
, false,
849 hclge_mbx_get_ring_vector_map_handler(struct hclge_mbx_ops_param
*param
)
853 ret
= hclge_get_vf_ring_vector_map(param
->vport
, param
->req
,
856 dev_err(¶m
->vport
->back
->pdev
->dev
,
857 "PF fail(%d) to get VF ring vector map\n",
862 static int hclge_mbx_set_promisc_mode_handler(struct hclge_mbx_ops_param
*param
)
864 hclge_set_vf_promisc_mode(param
->vport
, param
->req
);
868 static int hclge_mbx_set_unicast_handler(struct hclge_mbx_ops_param
*param
)
872 ret
= hclge_set_vf_uc_mac_addr(param
->vport
, param
->req
);
874 dev_err(¶m
->vport
->back
->pdev
->dev
,
875 "PF fail(%d) to set VF UC MAC Addr\n",
880 static int hclge_mbx_set_multicast_handler(struct hclge_mbx_ops_param
*param
)
884 ret
= hclge_set_vf_mc_mac_addr(param
->vport
, param
->req
);
886 dev_err(¶m
->vport
->back
->pdev
->dev
,
887 "PF fail(%d) to set VF MC MAC Addr\n",
892 static int hclge_mbx_set_vlan_handler(struct hclge_mbx_ops_param
*param
)
896 ret
= hclge_set_vf_vlan_cfg(param
->vport
, param
->req
, param
->resp_msg
);
898 dev_err(¶m
->vport
->back
->pdev
->dev
,
899 "PF failed(%d) to config VF's VLAN\n",
904 static int hclge_mbx_set_alive_handler(struct hclge_mbx_ops_param
*param
)
908 ret
= hclge_set_vf_alive(param
->vport
, param
->req
);
910 dev_err(¶m
->vport
->back
->pdev
->dev
,
911 "PF failed(%d) to set VF's ALIVE\n",
916 static int hclge_mbx_get_qinfo_handler(struct hclge_mbx_ops_param
*param
)
918 hclge_get_vf_queue_info(param
->vport
, param
->resp_msg
);
922 static int hclge_mbx_get_qdepth_handler(struct hclge_mbx_ops_param
*param
)
924 hclge_get_vf_queue_depth(param
->vport
, param
->resp_msg
);
928 static int hclge_mbx_get_basic_info_handler(struct hclge_mbx_ops_param
*param
)
930 hclge_get_basic_info(param
->vport
, param
->resp_msg
);
934 static int hclge_mbx_get_link_status_handler(struct hclge_mbx_ops_param
*param
)
938 ret
= hclge_push_vf_link_status(param
->vport
);
940 dev_err(¶m
->vport
->back
->pdev
->dev
,
941 "failed to inform link stat to VF, ret = %d\n",
946 static int hclge_mbx_queue_reset_handler(struct hclge_mbx_ops_param
*param
)
948 return hclge_mbx_reset_vf_queue(param
->vport
, param
->req
,
952 static int hclge_mbx_reset_handler(struct hclge_mbx_ops_param
*param
)
954 return hclge_reset_vf(param
->vport
);
957 static int hclge_mbx_keep_alive_handler(struct hclge_mbx_ops_param
*param
)
959 hclge_vf_keep_alive(param
->vport
);
963 static int hclge_mbx_set_mtu_handler(struct hclge_mbx_ops_param
*param
)
967 ret
= hclge_set_vf_mtu(param
->vport
, param
->req
);
969 dev_err(¶m
->vport
->back
->pdev
->dev
,
970 "VF fail(%d) to set mtu\n", ret
);
974 static int hclge_mbx_get_qid_in_pf_handler(struct hclge_mbx_ops_param
*param
)
976 return hclge_get_queue_id_in_pf(param
->vport
, param
->req
,
980 static int hclge_mbx_get_rss_key_handler(struct hclge_mbx_ops_param
*param
)
982 return hclge_get_rss_key(param
->vport
, param
->req
, param
->resp_msg
);
985 static int hclge_mbx_get_link_mode_handler(struct hclge_mbx_ops_param
*param
)
987 hclge_get_link_mode(param
->vport
, param
->req
);
992 hclge_mbx_get_vf_flr_status_handler(struct hclge_mbx_ops_param
*param
)
994 hclge_rm_vport_all_mac_table(param
->vport
, false,
996 hclge_rm_vport_all_mac_table(param
->vport
, false,
998 hclge_rm_vport_all_vlan_table(param
->vport
, false);
1002 static int hclge_mbx_vf_uninit_handler(struct hclge_mbx_ops_param
*param
)
1004 hclge_rm_vport_all_mac_table(param
->vport
, true,
1006 hclge_rm_vport_all_mac_table(param
->vport
, true,
1008 hclge_rm_vport_all_vlan_table(param
->vport
, true);
1009 param
->vport
->mps
= 0;
1013 static int hclge_mbx_get_media_type_handler(struct hclge_mbx_ops_param
*param
)
1015 hclge_get_vf_media_type(param
->vport
, param
->resp_msg
);
1019 static int hclge_mbx_push_link_status_handler(struct hclge_mbx_ops_param
*param
)
1021 hclge_handle_link_change_event(param
->vport
->back
, param
->req
);
1025 static int hclge_mbx_get_mac_addr_handler(struct hclge_mbx_ops_param
*param
)
1027 hclge_get_vf_mac_addr(param
->vport
, param
->resp_msg
);
1031 static int hclge_mbx_ncsi_error_handler(struct hclge_mbx_ops_param
*param
)
1033 hclge_handle_ncsi_error(param
->vport
->back
);
1037 static int hclge_mbx_handle_vf_tbl_handler(struct hclge_mbx_ops_param
*param
)
1039 hclge_handle_vf_tbl(param
->vport
, param
->req
);
1043 static const hclge_mbx_ops_fn hclge_mbx_ops_list
[HCLGE_MBX_OPCODE_MAX
] = {
1044 [HCLGE_MBX_RESET
] = hclge_mbx_reset_handler
,
1045 [HCLGE_MBX_SET_UNICAST
] = hclge_mbx_set_unicast_handler
,
1046 [HCLGE_MBX_SET_MULTICAST
] = hclge_mbx_set_multicast_handler
,
1047 [HCLGE_MBX_SET_VLAN
] = hclge_mbx_set_vlan_handler
,
1048 [HCLGE_MBX_MAP_RING_TO_VECTOR
] = hclge_mbx_map_ring_to_vector_handler
,
1049 [HCLGE_MBX_UNMAP_RING_TO_VECTOR
] = hclge_mbx_unmap_ring_to_vector_handler
,
1050 [HCLGE_MBX_SET_PROMISC_MODE
] = hclge_mbx_set_promisc_mode_handler
,
1051 [HCLGE_MBX_GET_QINFO
] = hclge_mbx_get_qinfo_handler
,
1052 [HCLGE_MBX_GET_QDEPTH
] = hclge_mbx_get_qdepth_handler
,
1053 [HCLGE_MBX_GET_BASIC_INFO
] = hclge_mbx_get_basic_info_handler
,
1054 [HCLGE_MBX_GET_RSS_KEY
] = hclge_mbx_get_rss_key_handler
,
1055 [HCLGE_MBX_GET_MAC_ADDR
] = hclge_mbx_get_mac_addr_handler
,
1056 [HCLGE_MBX_GET_LINK_STATUS
] = hclge_mbx_get_link_status_handler
,
1057 [HCLGE_MBX_QUEUE_RESET
] = hclge_mbx_queue_reset_handler
,
1058 [HCLGE_MBX_KEEP_ALIVE
] = hclge_mbx_keep_alive_handler
,
1059 [HCLGE_MBX_SET_ALIVE
] = hclge_mbx_set_alive_handler
,
1060 [HCLGE_MBX_SET_MTU
] = hclge_mbx_set_mtu_handler
,
1061 [HCLGE_MBX_GET_QID_IN_PF
] = hclge_mbx_get_qid_in_pf_handler
,
1062 [HCLGE_MBX_GET_LINK_MODE
] = hclge_mbx_get_link_mode_handler
,
1063 [HCLGE_MBX_GET_MEDIA_TYPE
] = hclge_mbx_get_media_type_handler
,
1064 [HCLGE_MBX_VF_UNINIT
] = hclge_mbx_vf_uninit_handler
,
1065 [HCLGE_MBX_HANDLE_VF_TBL
] = hclge_mbx_handle_vf_tbl_handler
,
1066 [HCLGE_MBX_GET_RING_VECTOR_MAP
] = hclge_mbx_get_ring_vector_map_handler
,
1067 [HCLGE_MBX_GET_VF_FLR_STATUS
] = hclge_mbx_get_vf_flr_status_handler
,
1068 [HCLGE_MBX_PUSH_LINK_STATUS
] = hclge_mbx_push_link_status_handler
,
1069 [HCLGE_MBX_NCSI_ERROR
] = hclge_mbx_ncsi_error_handler
,
1072 static void hclge_mbx_request_handling(struct hclge_mbx_ops_param
*param
)
1074 hclge_mbx_ops_fn cmd_func
= NULL
;
1075 struct hclge_dev
*hdev
;
1078 hdev
= param
->vport
->back
;
1079 cmd_func
= hclge_mbx_ops_list
[param
->req
->msg
.code
];
1081 dev_err(&hdev
->pdev
->dev
,
1082 "un-supported mailbox message, code = %u\n",
1083 param
->req
->msg
.code
);
1086 ret
= cmd_func(param
);
1088 /* PF driver should not reply IMP */
1089 if (hnae3_get_bit(param
->req
->mbx_need_resp
, HCLGE_MBX_NEED_RESP_B
) &&
1090 param
->req
->msg
.code
< HCLGE_MBX_GET_VF_FLR_STATUS
) {
1091 param
->resp_msg
->status
= ret
;
1092 if (time_is_before_jiffies(hdev
->last_mbx_scheduled
+
1093 HCLGE_MBX_SCHED_TIMEOUT
))
1094 dev_warn(&hdev
->pdev
->dev
,
1095 "resp vport%u mbx(%u,%u) late\n",
1096 param
->req
->mbx_src_vfid
,
1097 param
->req
->msg
.code
,
1098 param
->req
->msg
.subcode
);
1100 hclge_gen_resp_to_vf(param
->vport
, param
->req
, param
->resp_msg
);
1104 void hclge_mbx_handler(struct hclge_dev
*hdev
)
1106 struct hclge_comm_cmq_ring
*crq
= &hdev
->hw
.hw
.cmq
.crq
;
1107 struct hclge_respond_to_vf_msg resp_msg
;
1108 struct hclge_mbx_vf_to_pf_cmd
*req
;
1109 struct hclge_mbx_ops_param param
;
1110 struct hclge_desc
*desc
;
1113 param
.resp_msg
= &resp_msg
;
1114 /* handle all the mailbox requests in the queue */
1115 while (!hclge_cmd_crq_empty(&hdev
->hw
)) {
1116 if (test_bit(HCLGE_COMM_STATE_CMD_DISABLE
,
1117 &hdev
->hw
.hw
.comm_state
)) {
1118 dev_warn(&hdev
->pdev
->dev
,
1119 "command queue needs re-initializing\n");
1123 desc
= &crq
->desc
[crq
->next_to_use
];
1124 req
= (struct hclge_mbx_vf_to_pf_cmd
*)desc
->data
;
1126 flag
= le16_to_cpu(crq
->desc
[crq
->next_to_use
].flag
);
1127 if (unlikely(!hnae3_get_bit(flag
, HCLGE_CMDQ_RX_OUTVLD_B
) ||
1128 req
->mbx_src_vfid
> hdev
->num_req_vfs
)) {
1129 dev_warn(&hdev
->pdev
->dev
,
1130 "dropped invalid mailbox message, code = %u, vfid = %u\n",
1131 req
->msg
.code
, req
->mbx_src_vfid
);
1133 /* dropping/not processing this invalid message */
1134 crq
->desc
[crq
->next_to_use
].flag
= 0;
1135 hclge_mbx_ring_ptr_move_crq(crq
);
1139 trace_hclge_pf_mbx_get(hdev
, req
);
1141 /* clear the resp_msg before processing every mailbox message */
1142 memset(&resp_msg
, 0, sizeof(resp_msg
));
1143 param
.vport
= &hdev
->vport
[req
->mbx_src_vfid
];
1145 hclge_mbx_request_handling(¶m
);
1147 crq
->desc
[crq
->next_to_use
].flag
= 0;
1148 hclge_mbx_ring_ptr_move_crq(crq
);
1151 /* Write back CMDQ_RQ header pointer, M7 need this pointer */
1152 hclge_write_dev(&hdev
->hw
, HCLGE_COMM_NIC_CRQ_HEAD_REG
,