WIP FPC-III support
[linux/fpc-iii.git] / drivers / net / ethernet / huawei / hinic / hinic_hw_mbox.c
blob5078c0c7386351175262a829e51423edea20374f
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Huawei HiNIC PCI Express Linux driver
3 * Copyright(c) 2017 Huawei Technologies Co., Ltd
4 */
5 #include <linux/pci.h>
6 #include <linux/delay.h>
7 #include <linux/types.h>
8 #include <linux/completion.h>
9 #include <linux/semaphore.h>
10 #include <linux/spinlock.h>
11 #include <linux/workqueue.h>
13 #include "hinic_hw_if.h"
14 #include "hinic_hw_mgmt.h"
15 #include "hinic_hw_csr.h"
16 #include "hinic_hw_dev.h"
17 #include "hinic_hw_mbox.h"
19 #define HINIC_MBOX_INT_DST_FUNC_SHIFT 0
20 #define HINIC_MBOX_INT_DST_AEQN_SHIFT 10
21 #define HINIC_MBOX_INT_SRC_RESP_AEQN_SHIFT 12
22 #define HINIC_MBOX_INT_STAT_DMA_SHIFT 14
23 /* The size of data to be sended (unit of 4 bytes) */
24 #define HINIC_MBOX_INT_TX_SIZE_SHIFT 20
25 /* SO_RO(strong order, relax order) */
26 #define HINIC_MBOX_INT_STAT_DMA_SO_RO_SHIFT 25
27 #define HINIC_MBOX_INT_WB_EN_SHIFT 28
29 #define HINIC_MBOX_INT_DST_FUNC_MASK 0x3FF
30 #define HINIC_MBOX_INT_DST_AEQN_MASK 0x3
31 #define HINIC_MBOX_INT_SRC_RESP_AEQN_MASK 0x3
32 #define HINIC_MBOX_INT_STAT_DMA_MASK 0x3F
33 #define HINIC_MBOX_INT_TX_SIZE_MASK 0x1F
34 #define HINIC_MBOX_INT_STAT_DMA_SO_RO_MASK 0x3
35 #define HINIC_MBOX_INT_WB_EN_MASK 0x1
37 #define HINIC_MBOX_INT_SET(val, field) \
38 (((val) & HINIC_MBOX_INT_##field##_MASK) << \
39 HINIC_MBOX_INT_##field##_SHIFT)
41 enum hinic_mbox_tx_status {
42 TX_NOT_DONE = 1,
45 #define HINIC_MBOX_CTRL_TRIGGER_AEQE_SHIFT 0
47 /* specifies the issue request for the message data.
48 * 0 - Tx request is done;
49 * 1 - Tx request is in process.
51 #define HINIC_MBOX_CTRL_TX_STATUS_SHIFT 1
53 #define HINIC_MBOX_CTRL_TRIGGER_AEQE_MASK 0x1
54 #define HINIC_MBOX_CTRL_TX_STATUS_MASK 0x1
56 #define HINIC_MBOX_CTRL_SET(val, field) \
57 (((val) & HINIC_MBOX_CTRL_##field##_MASK) << \
58 HINIC_MBOX_CTRL_##field##_SHIFT)
60 #define HINIC_MBOX_HEADER_MSG_LEN_SHIFT 0
61 #define HINIC_MBOX_HEADER_MODULE_SHIFT 11
62 #define HINIC_MBOX_HEADER_SEG_LEN_SHIFT 16
63 #define HINIC_MBOX_HEADER_NO_ACK_SHIFT 22
64 #define HINIC_MBOX_HEADER_SEQID_SHIFT 24
65 #define HINIC_MBOX_HEADER_LAST_SHIFT 30
67 /* specifies the mailbox message direction
68 * 0 - send
69 * 1 - receive
71 #define HINIC_MBOX_HEADER_DIRECTION_SHIFT 31
72 #define HINIC_MBOX_HEADER_CMD_SHIFT 32
73 #define HINIC_MBOX_HEADER_MSG_ID_SHIFT 40
74 #define HINIC_MBOX_HEADER_STATUS_SHIFT 48
75 #define HINIC_MBOX_HEADER_SRC_GLB_FUNC_IDX_SHIFT 54
77 #define HINIC_MBOX_HEADER_MSG_LEN_MASK 0x7FF
78 #define HINIC_MBOX_HEADER_MODULE_MASK 0x1F
79 #define HINIC_MBOX_HEADER_SEG_LEN_MASK 0x3F
80 #define HINIC_MBOX_HEADER_NO_ACK_MASK 0x1
81 #define HINIC_MBOX_HEADER_SEQID_MASK 0x3F
82 #define HINIC_MBOX_HEADER_LAST_MASK 0x1
83 #define HINIC_MBOX_HEADER_DIRECTION_MASK 0x1
84 #define HINIC_MBOX_HEADER_CMD_MASK 0xFF
85 #define HINIC_MBOX_HEADER_MSG_ID_MASK 0xFF
86 #define HINIC_MBOX_HEADER_STATUS_MASK 0x3F
87 #define HINIC_MBOX_HEADER_SRC_GLB_FUNC_IDX_MASK 0x3FF
89 #define HINIC_MBOX_HEADER_GET(val, field) \
90 (((val) >> HINIC_MBOX_HEADER_##field##_SHIFT) & \
91 HINIC_MBOX_HEADER_##field##_MASK)
92 #define HINIC_MBOX_HEADER_SET(val, field) \
93 ((u64)((val) & HINIC_MBOX_HEADER_##field##_MASK) << \
94 HINIC_MBOX_HEADER_##field##_SHIFT)
96 #define MBOX_SEGLEN_MASK \
97 HINIC_MBOX_HEADER_SET(HINIC_MBOX_HEADER_SEG_LEN_MASK, SEG_LEN)
99 #define HINIC_MBOX_SEG_LEN 48
100 #define HINIC_MBOX_COMP_TIME 8000U
101 #define MBOX_MSG_POLLING_TIMEOUT 8000
103 #define HINIC_MBOX_DATA_SIZE 2040
105 #define MBOX_MAX_BUF_SZ 2048UL
106 #define MBOX_HEADER_SZ 8
108 #define MBOX_INFO_SZ 4
110 /* MBOX size is 64B, 8B for mbox_header, 4B reserved */
111 #define MBOX_SEG_LEN 48
112 #define MBOX_SEG_LEN_ALIGN 4
113 #define MBOX_WB_STATUS_LEN 16UL
115 /* mbox write back status is 16B, only first 4B is used */
116 #define MBOX_WB_STATUS_ERRCODE_MASK 0xFFFF
117 #define MBOX_WB_STATUS_MASK 0xFF
118 #define MBOX_WB_ERROR_CODE_MASK 0xFF00
119 #define MBOX_WB_STATUS_FINISHED_SUCCESS 0xFF
120 #define MBOX_WB_STATUS_FINISHED_WITH_ERR 0xFE
121 #define MBOX_WB_STATUS_NOT_FINISHED 0x00
123 #define MBOX_STATUS_FINISHED(wb) \
124 (((wb) & MBOX_WB_STATUS_MASK) != MBOX_WB_STATUS_NOT_FINISHED)
125 #define MBOX_STATUS_SUCCESS(wb) \
126 (((wb) & MBOX_WB_STATUS_MASK) == MBOX_WB_STATUS_FINISHED_SUCCESS)
127 #define MBOX_STATUS_ERRCODE(wb) \
128 ((wb) & MBOX_WB_ERROR_CODE_MASK)
130 #define SEQ_ID_START_VAL 0
131 #define SEQ_ID_MAX_VAL 42
133 #define DST_AEQ_IDX_DEFAULT_VAL 0
134 #define SRC_AEQ_IDX_DEFAULT_VAL 0
135 #define NO_DMA_ATTRIBUTE_VAL 0
137 #define HINIC_MGMT_RSP_AEQN 0
138 #define HINIC_MBOX_RSP_AEQN 2
139 #define HINIC_MBOX_RECV_AEQN 0
141 #define MBOX_MSG_NO_DATA_LEN 1
143 #define MBOX_BODY_FROM_HDR(header) ((u8 *)(header) + MBOX_HEADER_SZ)
144 #define MBOX_AREA(hwif) \
145 ((hwif)->cfg_regs_bar + HINIC_FUNC_CSR_MAILBOX_DATA_OFF)
147 #define IS_PF_OR_PPF_SRC(src_func_idx) ((src_func_idx) < HINIC_MAX_PF_FUNCS)
149 #define MBOX_RESPONSE_ERROR 0x1
150 #define MBOX_MSG_ID_MASK 0xFF
151 #define MBOX_MSG_ID(func_to_func) ((func_to_func)->send_msg_id)
152 #define MBOX_MSG_ID_INC(func_to_func_mbox) (MBOX_MSG_ID(func_to_func_mbox) = \
153 (MBOX_MSG_ID(func_to_func_mbox) + 1) & MBOX_MSG_ID_MASK)
155 #define FUNC_ID_OFF_SET_8B 8
157 /* max message counter wait to process for one function */
158 #define HINIC_MAX_MSG_CNT_TO_PROCESS 10
160 #define HINIC_QUEUE_MIN_DEPTH 6
161 #define HINIC_QUEUE_MAX_DEPTH 12
162 #define HINIC_MAX_RX_BUFFER_SIZE 15
164 enum hinic_hwif_direction_type {
165 HINIC_HWIF_DIRECT_SEND = 0,
166 HINIC_HWIF_RESPONSE = 1,
169 enum mbox_send_mod {
170 MBOX_SEND_MSG_INT,
173 enum mbox_seg_type {
174 NOT_LAST_SEG,
175 LAST_SEG,
178 enum mbox_ordering_type {
179 STRONG_ORDER,
182 enum mbox_write_back_type {
183 WRITE_BACK = 1,
186 enum mbox_aeq_trig_type {
187 NOT_TRIGGER,
188 TRIGGER,
191 static bool check_func_id(struct hinic_hwdev *hwdev, u16 src_func_idx,
192 const void *buf_in, u16 in_size, u16 offset)
194 u16 func_idx;
196 if (in_size < offset + sizeof(func_idx)) {
197 dev_warn(&hwdev->hwif->pdev->dev,
198 "Receive mailbox msg len: %d less than %d Bytes is invalid\n",
199 in_size, offset);
200 return false;
203 func_idx = *((u16 *)((u8 *)buf_in + offset));
205 if (src_func_idx != func_idx) {
206 dev_warn(&hwdev->hwif->pdev->dev,
207 "Receive mailbox function id: 0x%x not equal to msg function id: 0x%x\n",
208 src_func_idx, func_idx);
209 return false;
212 return true;
215 bool hinic_mbox_check_func_id_8B(struct hinic_hwdev *hwdev, u16 func_idx,
216 void *buf_in, u16 in_size)
218 return check_func_id(hwdev, func_idx, buf_in, in_size,
219 FUNC_ID_OFF_SET_8B);
223 * hinic_register_pf_mbox_cb - register mbox callback for pf
224 * @hwdev: the pointer to hw device
225 * @mod: specific mod that the callback will handle
226 * @callback: callback function
227 * Return: 0 - success, negative - failure
229 int hinic_register_pf_mbox_cb(struct hinic_hwdev *hwdev,
230 enum hinic_mod_type mod,
231 hinic_pf_mbox_cb callback)
233 struct hinic_mbox_func_to_func *func_to_func = hwdev->func_to_func;
235 if (mod >= HINIC_MOD_MAX)
236 return -EFAULT;
238 func_to_func->pf_mbox_cb[mod] = callback;
240 set_bit(HINIC_PF_MBOX_CB_REG, &func_to_func->pf_mbox_cb_state[mod]);
242 return 0;
246 * hinic_register_vf_mbox_cb - register mbox callback for vf
247 * @hwdev: the pointer to hw device
248 * @mod: specific mod that the callback will handle
249 * @callback: callback function
250 * Return: 0 - success, negative - failure
252 int hinic_register_vf_mbox_cb(struct hinic_hwdev *hwdev,
253 enum hinic_mod_type mod,
254 hinic_vf_mbox_cb callback)
256 struct hinic_mbox_func_to_func *func_to_func = hwdev->func_to_func;
258 if (mod >= HINIC_MOD_MAX)
259 return -EFAULT;
261 func_to_func->vf_mbox_cb[mod] = callback;
263 set_bit(HINIC_VF_MBOX_CB_REG, &func_to_func->vf_mbox_cb_state[mod]);
265 return 0;
269 * hinic_unregister_pf_mbox_cb - unregister the mbox callback for pf
270 * @hwdev: the pointer to hw device
271 * @mod: specific mod that the callback will handle
273 void hinic_unregister_pf_mbox_cb(struct hinic_hwdev *hwdev,
274 enum hinic_mod_type mod)
276 struct hinic_mbox_func_to_func *func_to_func = hwdev->func_to_func;
278 clear_bit(HINIC_PF_MBOX_CB_REG, &func_to_func->pf_mbox_cb_state[mod]);
280 while (test_bit(HINIC_PF_MBOX_CB_RUNNING,
281 &func_to_func->pf_mbox_cb_state[mod]))
282 usleep_range(900, 1000);
284 func_to_func->pf_mbox_cb[mod] = NULL;
288 * hinic_unregister_vf_mbox_cb - unregister the mbox callback for vf
289 * @hwdev: the pointer to hw device
290 * @mod: specific mod that the callback will handle
292 void hinic_unregister_vf_mbox_cb(struct hinic_hwdev *hwdev,
293 enum hinic_mod_type mod)
295 struct hinic_mbox_func_to_func *func_to_func = hwdev->func_to_func;
297 clear_bit(HINIC_VF_MBOX_CB_REG, &func_to_func->vf_mbox_cb_state[mod]);
299 while (test_bit(HINIC_VF_MBOX_CB_RUNNING,
300 &func_to_func->vf_mbox_cb_state[mod]))
301 usleep_range(900, 1000);
303 func_to_func->vf_mbox_cb[mod] = NULL;
306 static int recv_vf_mbox_handler(struct hinic_mbox_func_to_func *func_to_func,
307 struct hinic_recv_mbox *recv_mbox,
308 void *buf_out, u16 *out_size)
310 hinic_vf_mbox_cb cb;
311 int ret = 0;
313 if (recv_mbox->mod >= HINIC_MOD_MAX) {
314 dev_err(&func_to_func->hwif->pdev->dev, "Receive illegal mbox message, mod = %d\n",
315 recv_mbox->mod);
316 return -EINVAL;
319 set_bit(HINIC_VF_MBOX_CB_RUNNING,
320 &func_to_func->vf_mbox_cb_state[recv_mbox->mod]);
322 cb = func_to_func->vf_mbox_cb[recv_mbox->mod];
323 if (cb && test_bit(HINIC_VF_MBOX_CB_REG,
324 &func_to_func->vf_mbox_cb_state[recv_mbox->mod])) {
325 cb(func_to_func->hwdev, recv_mbox->cmd, recv_mbox->mbox,
326 recv_mbox->mbox_len, buf_out, out_size);
327 } else {
328 dev_err(&func_to_func->hwif->pdev->dev, "VF mbox cb is not registered\n");
329 ret = -EINVAL;
332 clear_bit(HINIC_VF_MBOX_CB_RUNNING,
333 &func_to_func->vf_mbox_cb_state[recv_mbox->mod]);
335 return ret;
338 static int
339 recv_pf_from_vf_mbox_handler(struct hinic_mbox_func_to_func *func_to_func,
340 struct hinic_recv_mbox *recv_mbox,
341 u16 src_func_idx, void *buf_out,
342 u16 *out_size)
344 hinic_pf_mbox_cb cb;
345 u16 vf_id = 0;
346 int ret;
348 if (recv_mbox->mod >= HINIC_MOD_MAX) {
349 dev_err(&func_to_func->hwif->pdev->dev, "Receive illegal mbox message, mod = %d\n",
350 recv_mbox->mod);
351 return -EINVAL;
354 set_bit(HINIC_PF_MBOX_CB_RUNNING,
355 &func_to_func->pf_mbox_cb_state[recv_mbox->mod]);
357 cb = func_to_func->pf_mbox_cb[recv_mbox->mod];
358 if (cb && test_bit(HINIC_PF_MBOX_CB_REG,
359 &func_to_func->pf_mbox_cb_state[recv_mbox->mod])) {
360 vf_id = src_func_idx -
361 hinic_glb_pf_vf_offset(func_to_func->hwif);
362 ret = cb(func_to_func->hwdev, vf_id, recv_mbox->cmd,
363 recv_mbox->mbox, recv_mbox->mbox_len,
364 buf_out, out_size);
365 } else {
366 dev_err(&func_to_func->hwif->pdev->dev, "PF mbox mod(0x%x) cb is not registered\n",
367 recv_mbox->mod);
368 ret = -EINVAL;
371 clear_bit(HINIC_PF_MBOX_CB_RUNNING,
372 &func_to_func->pf_mbox_cb_state[recv_mbox->mod]);
374 return ret;
377 static bool check_mbox_seq_id_and_seg_len(struct hinic_recv_mbox *recv_mbox,
378 u8 seq_id, u8 seg_len)
380 if (seq_id > SEQ_ID_MAX_VAL || seg_len > MBOX_SEG_LEN)
381 return false;
383 if (seq_id == 0) {
384 recv_mbox->seq_id = seq_id;
385 } else {
386 if (seq_id != recv_mbox->seq_id + 1)
387 return false;
389 recv_mbox->seq_id = seq_id;
392 return true;
395 static void resp_mbox_handler(struct hinic_mbox_func_to_func *func_to_func,
396 struct hinic_recv_mbox *recv_mbox)
398 spin_lock(&func_to_func->mbox_lock);
399 if (recv_mbox->msg_info.msg_id == func_to_func->send_msg_id &&
400 func_to_func->event_flag == EVENT_START)
401 complete(&recv_mbox->recv_done);
402 else
403 dev_err(&func_to_func->hwif->pdev->dev,
404 "Mbox response timeout, current send msg id(0x%x), recv msg id(0x%x), status(0x%x)\n",
405 func_to_func->send_msg_id, recv_mbox->msg_info.msg_id,
406 recv_mbox->msg_info.status);
407 spin_unlock(&func_to_func->mbox_lock);
410 static void recv_func_mbox_handler(struct hinic_mbox_func_to_func *func_to_func,
411 struct hinic_recv_mbox *recv_mbox,
412 u16 src_func_idx);
414 static void recv_func_mbox_work_handler(struct work_struct *work)
416 struct hinic_mbox_work *mbox_work =
417 container_of(work, struct hinic_mbox_work, work);
418 struct hinic_recv_mbox *recv_mbox;
420 recv_func_mbox_handler(mbox_work->func_to_func, mbox_work->recv_mbox,
421 mbox_work->src_func_idx);
423 recv_mbox =
424 &mbox_work->func_to_func->mbox_send[mbox_work->src_func_idx];
426 atomic_dec(&recv_mbox->msg_cnt);
428 kfree(mbox_work);
431 static void recv_mbox_handler(struct hinic_mbox_func_to_func *func_to_func,
432 void *header, struct hinic_recv_mbox *recv_mbox)
434 void *mbox_body = MBOX_BODY_FROM_HDR(header);
435 struct hinic_recv_mbox *rcv_mbox_temp = NULL;
436 u64 mbox_header = *((u64 *)header);
437 struct hinic_mbox_work *mbox_work;
438 u8 seq_id, seg_len;
439 u16 src_func_idx;
440 int pos;
442 seq_id = HINIC_MBOX_HEADER_GET(mbox_header, SEQID);
443 seg_len = HINIC_MBOX_HEADER_GET(mbox_header, SEG_LEN);
444 src_func_idx = HINIC_MBOX_HEADER_GET(mbox_header, SRC_GLB_FUNC_IDX);
446 if (!check_mbox_seq_id_and_seg_len(recv_mbox, seq_id, seg_len)) {
447 dev_err(&func_to_func->hwif->pdev->dev,
448 "Mailbox sequence and segment check fail, src func id: 0x%x, front id: 0x%x, current id: 0x%x, seg len: 0x%x\n",
449 src_func_idx, recv_mbox->seq_id, seq_id, seg_len);
450 recv_mbox->seq_id = SEQ_ID_MAX_VAL;
451 return;
454 pos = seq_id * MBOX_SEG_LEN;
455 memcpy((u8 *)recv_mbox->mbox + pos, mbox_body,
456 HINIC_MBOX_HEADER_GET(mbox_header, SEG_LEN));
458 if (!HINIC_MBOX_HEADER_GET(mbox_header, LAST))
459 return;
461 recv_mbox->cmd = HINIC_MBOX_HEADER_GET(mbox_header, CMD);
462 recv_mbox->mod = HINIC_MBOX_HEADER_GET(mbox_header, MODULE);
463 recv_mbox->mbox_len = HINIC_MBOX_HEADER_GET(mbox_header, MSG_LEN);
464 recv_mbox->ack_type = HINIC_MBOX_HEADER_GET(mbox_header, NO_ACK);
465 recv_mbox->msg_info.msg_id = HINIC_MBOX_HEADER_GET(mbox_header, MSG_ID);
466 recv_mbox->msg_info.status = HINIC_MBOX_HEADER_GET(mbox_header, STATUS);
467 recv_mbox->seq_id = SEQ_ID_MAX_VAL;
469 if (HINIC_MBOX_HEADER_GET(mbox_header, DIRECTION) ==
470 HINIC_HWIF_RESPONSE) {
471 resp_mbox_handler(func_to_func, recv_mbox);
472 return;
475 if (atomic_read(&recv_mbox->msg_cnt) > HINIC_MAX_MSG_CNT_TO_PROCESS) {
476 dev_warn(&func_to_func->hwif->pdev->dev,
477 "This function(%u) have %d message wait to process,can't add to work queue\n",
478 src_func_idx, atomic_read(&recv_mbox->msg_cnt));
479 return;
482 rcv_mbox_temp = kmemdup(recv_mbox, sizeof(*rcv_mbox_temp), GFP_KERNEL);
483 if (!rcv_mbox_temp)
484 return;
486 rcv_mbox_temp->mbox = kmemdup(recv_mbox->mbox, MBOX_MAX_BUF_SZ,
487 GFP_KERNEL);
488 if (!rcv_mbox_temp->mbox)
489 goto err_alloc_rcv_mbox_msg;
491 rcv_mbox_temp->buf_out = kzalloc(MBOX_MAX_BUF_SZ, GFP_KERNEL);
492 if (!rcv_mbox_temp->buf_out)
493 goto err_alloc_rcv_mbox_buf;
495 mbox_work = kzalloc(sizeof(*mbox_work), GFP_KERNEL);
496 if (!mbox_work)
497 goto err_alloc_mbox_work;
499 mbox_work->func_to_func = func_to_func;
500 mbox_work->recv_mbox = rcv_mbox_temp;
501 mbox_work->src_func_idx = src_func_idx;
503 atomic_inc(&recv_mbox->msg_cnt);
504 INIT_WORK(&mbox_work->work, recv_func_mbox_work_handler);
505 queue_work(func_to_func->workq, &mbox_work->work);
507 return;
509 err_alloc_mbox_work:
510 kfree(rcv_mbox_temp->buf_out);
512 err_alloc_rcv_mbox_buf:
513 kfree(rcv_mbox_temp->mbox);
515 err_alloc_rcv_mbox_msg:
516 kfree(rcv_mbox_temp);
519 static int set_vf_mbox_random_id(struct hinic_hwdev *hwdev, u16 func_id)
521 struct hinic_mbox_func_to_func *func_to_func = hwdev->func_to_func;
522 struct hinic_set_random_id rand_info = {0};
523 u16 out_size = sizeof(rand_info);
524 struct hinic_pfhwdev *pfhwdev;
525 int ret;
527 pfhwdev = container_of(hwdev, struct hinic_pfhwdev, hwdev);
529 rand_info.version = HINIC_CMD_VER_FUNC_ID;
530 rand_info.func_idx = func_id;
531 rand_info.vf_in_pf = func_id - hinic_glb_pf_vf_offset(hwdev->hwif);
532 rand_info.random_id = get_random_u32();
534 func_to_func->vf_mbx_rand_id[func_id] = rand_info.random_id;
536 ret = hinic_msg_to_mgmt(&pfhwdev->pf_to_mgmt, HINIC_MOD_COMM,
537 HINIC_MGMT_CMD_SET_VF_RANDOM_ID,
538 &rand_info, sizeof(rand_info),
539 &rand_info, &out_size, HINIC_MGMT_MSG_SYNC);
540 if ((rand_info.status != HINIC_MGMT_CMD_UNSUPPORTED &&
541 rand_info.status) || !out_size || ret) {
542 dev_err(&hwdev->hwif->pdev->dev, "Set VF random id failed, err: %d, status: 0x%x, out size: 0x%x\n",
543 ret, rand_info.status, out_size);
544 return -EIO;
547 if (rand_info.status == HINIC_MGMT_CMD_UNSUPPORTED)
548 return rand_info.status;
550 func_to_func->vf_mbx_old_rand_id[func_id] =
551 func_to_func->vf_mbx_rand_id[func_id];
553 return 0;
556 static void update_random_id_work_handler(struct work_struct *work)
558 struct hinic_mbox_work *mbox_work =
559 container_of(work, struct hinic_mbox_work, work);
560 struct hinic_mbox_func_to_func *func_to_func;
561 u16 src = mbox_work->src_func_idx;
563 func_to_func = mbox_work->func_to_func;
565 if (set_vf_mbox_random_id(func_to_func->hwdev, src))
566 dev_warn(&func_to_func->hwdev->hwif->pdev->dev, "Update VF id: 0x%x random id failed\n",
567 mbox_work->src_func_idx);
569 kfree(mbox_work);
572 static bool check_vf_mbox_random_id(struct hinic_mbox_func_to_func *func_to_func,
573 u8 *header)
575 struct hinic_hwdev *hwdev = func_to_func->hwdev;
576 struct hinic_mbox_work *mbox_work = NULL;
577 u64 mbox_header = *((u64 *)header);
578 u16 offset, src;
579 u32 random_id;
580 int vf_in_pf;
582 src = HINIC_MBOX_HEADER_GET(mbox_header, SRC_GLB_FUNC_IDX);
584 if (IS_PF_OR_PPF_SRC(src) || !func_to_func->support_vf_random)
585 return true;
587 if (!HINIC_IS_PPF(hwdev->hwif)) {
588 offset = hinic_glb_pf_vf_offset(hwdev->hwif);
589 vf_in_pf = src - offset;
591 if (vf_in_pf < 1 || vf_in_pf > hwdev->nic_cap.max_vf) {
592 dev_warn(&hwdev->hwif->pdev->dev,
593 "Receive vf id(0x%x) is invalid, vf id should be from 0x%x to 0x%x\n",
594 src, offset + 1,
595 hwdev->nic_cap.max_vf + offset);
596 return false;
600 random_id = be32_to_cpu(*(u32 *)(header + MBOX_SEG_LEN +
601 MBOX_HEADER_SZ));
603 if (random_id == func_to_func->vf_mbx_rand_id[src] ||
604 random_id == func_to_func->vf_mbx_old_rand_id[src])
605 return true;
607 dev_warn(&hwdev->hwif->pdev->dev,
608 "The mailbox random id(0x%x) of func_id(0x%x) doesn't match with pf reservation(0x%x)\n",
609 random_id, src, func_to_func->vf_mbx_rand_id[src]);
611 mbox_work = kzalloc(sizeof(*mbox_work), GFP_KERNEL);
612 if (!mbox_work)
613 return false;
615 mbox_work->func_to_func = func_to_func;
616 mbox_work->src_func_idx = src;
618 INIT_WORK(&mbox_work->work, update_random_id_work_handler);
619 queue_work(func_to_func->workq, &mbox_work->work);
621 return false;
624 void hinic_mbox_func_aeqe_handler(void *handle, void *header, u8 size)
626 struct hinic_mbox_func_to_func *func_to_func;
627 u64 mbox_header = *((u64 *)header);
628 struct hinic_recv_mbox *recv_mbox;
629 u64 src, dir;
631 func_to_func = ((struct hinic_hwdev *)handle)->func_to_func;
633 dir = HINIC_MBOX_HEADER_GET(mbox_header, DIRECTION);
634 src = HINIC_MBOX_HEADER_GET(mbox_header, SRC_GLB_FUNC_IDX);
636 if (src >= HINIC_MAX_FUNCTIONS) {
637 dev_err(&func_to_func->hwif->pdev->dev,
638 "Mailbox source function id:%u is invalid\n", (u32)src);
639 return;
642 if (!check_vf_mbox_random_id(func_to_func, header))
643 return;
645 recv_mbox = (dir == HINIC_HWIF_DIRECT_SEND) ?
646 &func_to_func->mbox_send[src] :
647 &func_to_func->mbox_resp[src];
649 recv_mbox_handler(func_to_func, (u64 *)header, recv_mbox);
652 void hinic_mbox_self_aeqe_handler(void *handle, void *header, u8 size)
654 struct hinic_mbox_func_to_func *func_to_func;
655 struct hinic_send_mbox *send_mbox;
657 func_to_func = ((struct hinic_hwdev *)handle)->func_to_func;
658 send_mbox = &func_to_func->send_mbox;
660 complete(&send_mbox->send_done);
663 static void clear_mbox_status(struct hinic_send_mbox *mbox)
665 *mbox->wb_status = 0;
667 /* clear mailbox write back status */
668 wmb();
671 static void mbox_copy_header(struct hinic_hwdev *hwdev,
672 struct hinic_send_mbox *mbox, u64 *header)
674 u32 i, idx_max = MBOX_HEADER_SZ / sizeof(u32);
675 u32 *data = (u32 *)header;
677 for (i = 0; i < idx_max; i++)
678 __raw_writel(*(data + i), mbox->data + i * sizeof(u32));
681 static void mbox_copy_send_data(struct hinic_hwdev *hwdev,
682 struct hinic_send_mbox *mbox, void *seg,
683 u16 seg_len)
685 u8 mbox_max_buf[MBOX_SEG_LEN] = {0};
686 u32 data_len, chk_sz = sizeof(u32);
687 u32 *data = seg;
688 u32 i, idx_max;
690 /* The mbox message should be aligned in 4 bytes. */
691 if (seg_len % chk_sz) {
692 memcpy(mbox_max_buf, seg, seg_len);
693 data = (u32 *)mbox_max_buf;
696 data_len = seg_len;
697 idx_max = ALIGN(data_len, chk_sz) / chk_sz;
699 for (i = 0; i < idx_max; i++)
700 __raw_writel(*(data + i),
701 mbox->data + MBOX_HEADER_SZ + i * sizeof(u32));
704 static void write_mbox_msg_attr(struct hinic_mbox_func_to_func *func_to_func,
705 u16 dst_func, u16 dst_aeqn, u16 seg_len,
706 int poll)
708 u16 rsp_aeq = (dst_aeqn == 0) ? 0 : HINIC_MBOX_RSP_AEQN;
709 u32 mbox_int, mbox_ctrl;
711 mbox_int = HINIC_MBOX_INT_SET(dst_func, DST_FUNC) |
712 HINIC_MBOX_INT_SET(dst_aeqn, DST_AEQN) |
713 HINIC_MBOX_INT_SET(rsp_aeq, SRC_RESP_AEQN) |
714 HINIC_MBOX_INT_SET(NO_DMA_ATTRIBUTE_VAL, STAT_DMA) |
715 HINIC_MBOX_INT_SET(ALIGN(MBOX_SEG_LEN + MBOX_HEADER_SZ +
716 MBOX_INFO_SZ, MBOX_SEG_LEN_ALIGN) >> 2,
717 TX_SIZE) |
718 HINIC_MBOX_INT_SET(STRONG_ORDER, STAT_DMA_SO_RO) |
719 HINIC_MBOX_INT_SET(WRITE_BACK, WB_EN);
721 hinic_hwif_write_reg(func_to_func->hwif,
722 HINIC_FUNC_CSR_MAILBOX_INT_OFFSET_OFF, mbox_int);
724 wmb(); /* writing the mbox int attributes */
725 mbox_ctrl = HINIC_MBOX_CTRL_SET(TX_NOT_DONE, TX_STATUS);
727 if (poll)
728 mbox_ctrl |= HINIC_MBOX_CTRL_SET(NOT_TRIGGER, TRIGGER_AEQE);
729 else
730 mbox_ctrl |= HINIC_MBOX_CTRL_SET(TRIGGER, TRIGGER_AEQE);
732 hinic_hwif_write_reg(func_to_func->hwif,
733 HINIC_FUNC_CSR_MAILBOX_CONTROL_OFF, mbox_ctrl);
736 static void dump_mox_reg(struct hinic_hwdev *hwdev)
738 u32 val;
740 val = hinic_hwif_read_reg(hwdev->hwif,
741 HINIC_FUNC_CSR_MAILBOX_CONTROL_OFF);
742 dev_err(&hwdev->hwif->pdev->dev, "Mailbox control reg: 0x%x\n", val);
744 val = hinic_hwif_read_reg(hwdev->hwif,
745 HINIC_FUNC_CSR_MAILBOX_INT_OFFSET_OFF);
746 dev_err(&hwdev->hwif->pdev->dev, "Mailbox interrupt offset: 0x%x\n",
747 val);
750 static u16 get_mbox_status(struct hinic_send_mbox *mbox)
752 /* write back is 16B, but only use first 4B */
753 u64 wb_val = be64_to_cpu(*mbox->wb_status);
755 rmb(); /* verify reading before check */
757 return (u16)(wb_val & MBOX_WB_STATUS_ERRCODE_MASK);
760 static int
761 wait_for_mbox_seg_completion(struct hinic_mbox_func_to_func *func_to_func,
762 int poll, u16 *wb_status)
764 struct hinic_send_mbox *send_mbox = &func_to_func->send_mbox;
765 struct hinic_hwdev *hwdev = func_to_func->hwdev;
766 struct completion *done = &send_mbox->send_done;
767 u32 cnt = 0;
768 unsigned long jif;
770 if (poll) {
771 while (cnt < MBOX_MSG_POLLING_TIMEOUT) {
772 *wb_status = get_mbox_status(send_mbox);
773 if (MBOX_STATUS_FINISHED(*wb_status))
774 break;
776 usleep_range(900, 1000);
777 cnt++;
780 if (cnt == MBOX_MSG_POLLING_TIMEOUT) {
781 dev_err(&hwdev->hwif->pdev->dev, "Send mailbox segment timeout, wb status: 0x%x\n",
782 *wb_status);
783 dump_mox_reg(hwdev);
784 return -ETIMEDOUT;
786 } else {
787 jif = msecs_to_jiffies(HINIC_MBOX_COMP_TIME);
788 if (!wait_for_completion_timeout(done, jif)) {
789 dev_err(&hwdev->hwif->pdev->dev, "Send mailbox segment timeout\n");
790 dump_mox_reg(hwdev);
791 hinic_dump_aeq_info(hwdev);
792 return -ETIMEDOUT;
795 *wb_status = get_mbox_status(send_mbox);
798 return 0;
801 static int send_mbox_seg(struct hinic_mbox_func_to_func *func_to_func,
802 u64 header, u16 dst_func, void *seg, u16 seg_len,
803 int poll, void *msg_info)
805 struct hinic_send_mbox *send_mbox = &func_to_func->send_mbox;
806 u16 seq_dir = HINIC_MBOX_HEADER_GET(header, DIRECTION);
807 struct hinic_hwdev *hwdev = func_to_func->hwdev;
808 struct completion *done = &send_mbox->send_done;
809 u8 num_aeqs = hwdev->hwif->attr.num_aeqs;
810 u16 dst_aeqn, wb_status = 0, errcode;
812 if (num_aeqs >= 4)
813 dst_aeqn = (seq_dir == HINIC_HWIF_DIRECT_SEND) ?
814 HINIC_MBOX_RECV_AEQN : HINIC_MBOX_RSP_AEQN;
815 else
816 dst_aeqn = 0;
818 if (!poll)
819 init_completion(done);
821 clear_mbox_status(send_mbox);
823 mbox_copy_header(hwdev, send_mbox, &header);
825 mbox_copy_send_data(hwdev, send_mbox, seg, seg_len);
827 write_mbox_msg_attr(func_to_func, dst_func, dst_aeqn, seg_len, poll);
829 wmb(); /* writing the mbox msg attributes */
831 if (wait_for_mbox_seg_completion(func_to_func, poll, &wb_status))
832 return -ETIMEDOUT;
834 if (!MBOX_STATUS_SUCCESS(wb_status)) {
835 dev_err(&hwdev->hwif->pdev->dev, "Send mailbox segment to function %d error, wb status: 0x%x\n",
836 dst_func, wb_status);
837 errcode = MBOX_STATUS_ERRCODE(wb_status);
838 return errcode ? errcode : -EFAULT;
841 return 0;
844 static int send_mbox_to_func(struct hinic_mbox_func_to_func *func_to_func,
845 enum hinic_mod_type mod, u16 cmd, void *msg,
846 u16 msg_len, u16 dst_func,
847 enum hinic_hwif_direction_type direction,
848 enum hinic_mbox_ack_type ack_type,
849 struct mbox_msg_info *msg_info)
851 struct hinic_hwdev *hwdev = func_to_func->hwdev;
852 u16 seg_len = MBOX_SEG_LEN;
853 u8 *msg_seg = (u8 *)msg;
854 u16 left = msg_len;
855 u32 seq_id = 0;
856 u64 header = 0;
857 int err = 0;
859 down(&func_to_func->msg_send_sem);
861 header = HINIC_MBOX_HEADER_SET(msg_len, MSG_LEN) |
862 HINIC_MBOX_HEADER_SET(mod, MODULE) |
863 HINIC_MBOX_HEADER_SET(seg_len, SEG_LEN) |
864 HINIC_MBOX_HEADER_SET(ack_type, NO_ACK) |
865 HINIC_MBOX_HEADER_SET(SEQ_ID_START_VAL, SEQID) |
866 HINIC_MBOX_HEADER_SET(NOT_LAST_SEG, LAST) |
867 HINIC_MBOX_HEADER_SET(direction, DIRECTION) |
868 HINIC_MBOX_HEADER_SET(cmd, CMD) |
869 /* The vf's offset to it's associated pf */
870 HINIC_MBOX_HEADER_SET(msg_info->msg_id, MSG_ID) |
871 HINIC_MBOX_HEADER_SET(msg_info->status, STATUS) |
872 HINIC_MBOX_HEADER_SET(hinic_global_func_id_hw(hwdev->hwif),
873 SRC_GLB_FUNC_IDX);
875 while (!(HINIC_MBOX_HEADER_GET(header, LAST))) {
876 if (left <= HINIC_MBOX_SEG_LEN) {
877 header &= ~MBOX_SEGLEN_MASK;
878 header |= HINIC_MBOX_HEADER_SET(left, SEG_LEN);
879 header |= HINIC_MBOX_HEADER_SET(LAST_SEG, LAST);
881 seg_len = left;
884 err = send_mbox_seg(func_to_func, header, dst_func, msg_seg,
885 seg_len, MBOX_SEND_MSG_INT, msg_info);
886 if (err) {
887 dev_err(&hwdev->hwif->pdev->dev, "Failed to send mbox seg, seq_id=0x%llx\n",
888 HINIC_MBOX_HEADER_GET(header, SEQID));
889 goto err_send_mbox_seg;
892 left -= HINIC_MBOX_SEG_LEN;
893 msg_seg += HINIC_MBOX_SEG_LEN;
895 seq_id++;
896 header &= ~(HINIC_MBOX_HEADER_SET(HINIC_MBOX_HEADER_SEQID_MASK,
897 SEQID));
898 header |= HINIC_MBOX_HEADER_SET(seq_id, SEQID);
901 err_send_mbox_seg:
902 up(&func_to_func->msg_send_sem);
904 return err;
907 static void
908 response_for_recv_func_mbox(struct hinic_mbox_func_to_func *func_to_func,
909 struct hinic_recv_mbox *recv_mbox, int err,
910 u16 out_size, u16 src_func_idx)
912 struct mbox_msg_info msg_info = {0};
914 if (recv_mbox->ack_type == MBOX_ACK) {
915 msg_info.msg_id = recv_mbox->msg_info.msg_id;
916 if (err == HINIC_MBOX_PF_BUSY_ACTIVE_FW)
917 msg_info.status = HINIC_MBOX_PF_BUSY_ACTIVE_FW;
918 else if (err == HINIC_MBOX_VF_CMD_ERROR)
919 msg_info.status = HINIC_MBOX_VF_CMD_ERROR;
920 else if (err)
921 msg_info.status = HINIC_MBOX_PF_SEND_ERR;
923 /* if no data needs to response, set out_size to 1 */
924 if (!out_size || err)
925 out_size = MBOX_MSG_NO_DATA_LEN;
927 send_mbox_to_func(func_to_func, recv_mbox->mod, recv_mbox->cmd,
928 recv_mbox->buf_out, out_size, src_func_idx,
929 HINIC_HWIF_RESPONSE, MBOX_ACK,
930 &msg_info);
934 static void recv_func_mbox_handler(struct hinic_mbox_func_to_func *func_to_func,
935 struct hinic_recv_mbox *recv_mbox,
936 u16 src_func_idx)
938 void *buf_out = recv_mbox->buf_out;
939 u16 out_size = MBOX_MAX_BUF_SZ;
940 int err = 0;
942 if (HINIC_IS_VF(func_to_func->hwif)) {
943 err = recv_vf_mbox_handler(func_to_func, recv_mbox, buf_out,
944 &out_size);
945 } else {
946 if (IS_PF_OR_PPF_SRC(src_func_idx))
947 dev_warn(&func_to_func->hwif->pdev->dev,
948 "Unsupported pf2pf mbox msg\n");
949 else
950 err = recv_pf_from_vf_mbox_handler(func_to_func,
951 recv_mbox,
952 src_func_idx,
953 buf_out, &out_size);
956 response_for_recv_func_mbox(func_to_func, recv_mbox, err, out_size,
957 src_func_idx);
958 kfree(recv_mbox->buf_out);
959 kfree(recv_mbox->mbox);
960 kfree(recv_mbox);
963 static void set_mbox_to_func_event(struct hinic_mbox_func_to_func *func_to_func,
964 enum mbox_event_state event_flag)
966 spin_lock(&func_to_func->mbox_lock);
967 func_to_func->event_flag = event_flag;
968 spin_unlock(&func_to_func->mbox_lock);
971 static int mbox_resp_info_handler(struct hinic_mbox_func_to_func *func_to_func,
972 struct hinic_recv_mbox *mbox_for_resp,
973 enum hinic_mod_type mod, u16 cmd,
974 void *buf_out, u16 *out_size)
976 int err;
978 if (mbox_for_resp->msg_info.status) {
979 err = mbox_for_resp->msg_info.status;
980 if (err != HINIC_MBOX_PF_BUSY_ACTIVE_FW)
981 dev_err(&func_to_func->hwif->pdev->dev, "Mbox response error(0x%x)\n",
982 mbox_for_resp->msg_info.status);
983 return err;
986 if (buf_out && out_size) {
987 if (*out_size < mbox_for_resp->mbox_len) {
988 dev_err(&func_to_func->hwif->pdev->dev,
989 "Invalid response mbox message length: %d for mod %d cmd %d, should less than: %d\n",
990 mbox_for_resp->mbox_len, mod, cmd, *out_size);
991 return -EFAULT;
994 if (mbox_for_resp->mbox_len)
995 memcpy(buf_out, mbox_for_resp->mbox,
996 mbox_for_resp->mbox_len);
998 *out_size = mbox_for_resp->mbox_len;
1001 return 0;
1004 int hinic_mbox_to_func(struct hinic_mbox_func_to_func *func_to_func,
1005 enum hinic_mod_type mod, u16 cmd, u16 dst_func,
1006 void *buf_in, u16 in_size, void *buf_out,
1007 u16 *out_size, u32 timeout)
1009 struct hinic_recv_mbox *mbox_for_resp;
1010 struct mbox_msg_info msg_info = {0};
1011 unsigned long timeo;
1012 int err;
1014 mbox_for_resp = &func_to_func->mbox_resp[dst_func];
1016 down(&func_to_func->mbox_send_sem);
1018 init_completion(&mbox_for_resp->recv_done);
1020 msg_info.msg_id = MBOX_MSG_ID_INC(func_to_func);
1022 set_mbox_to_func_event(func_to_func, EVENT_START);
1024 err = send_mbox_to_func(func_to_func, mod, cmd, buf_in, in_size,
1025 dst_func, HINIC_HWIF_DIRECT_SEND, MBOX_ACK,
1026 &msg_info);
1027 if (err) {
1028 dev_err(&func_to_func->hwif->pdev->dev, "Send mailbox failed, msg_id: %d\n",
1029 msg_info.msg_id);
1030 set_mbox_to_func_event(func_to_func, EVENT_FAIL);
1031 goto err_send_mbox;
1034 timeo = msecs_to_jiffies(timeout ? timeout : HINIC_MBOX_COMP_TIME);
1035 if (!wait_for_completion_timeout(&mbox_for_resp->recv_done, timeo)) {
1036 set_mbox_to_func_event(func_to_func, EVENT_TIMEOUT);
1037 dev_err(&func_to_func->hwif->pdev->dev,
1038 "Send mbox msg timeout, msg_id: %d\n", msg_info.msg_id);
1039 hinic_dump_aeq_info(func_to_func->hwdev);
1040 err = -ETIMEDOUT;
1041 goto err_send_mbox;
1044 set_mbox_to_func_event(func_to_func, EVENT_END);
1046 err = mbox_resp_info_handler(func_to_func, mbox_for_resp, mod, cmd,
1047 buf_out, out_size);
1049 err_send_mbox:
1050 up(&func_to_func->mbox_send_sem);
1052 return err;
1055 static int mbox_func_params_valid(struct hinic_mbox_func_to_func *func_to_func,
1056 void *buf_in, u16 in_size)
1058 if (in_size > HINIC_MBOX_DATA_SIZE) {
1059 dev_err(&func_to_func->hwif->pdev->dev,
1060 "Mbox msg len(%d) exceed limit(%d)\n",
1061 in_size, HINIC_MBOX_DATA_SIZE);
1062 return -EINVAL;
1065 return 0;
1068 int hinic_mbox_to_pf(struct hinic_hwdev *hwdev,
1069 enum hinic_mod_type mod, u8 cmd, void *buf_in,
1070 u16 in_size, void *buf_out, u16 *out_size, u32 timeout)
1072 struct hinic_mbox_func_to_func *func_to_func = hwdev->func_to_func;
1073 int err = mbox_func_params_valid(func_to_func, buf_in, in_size);
1075 if (err)
1076 return err;
1078 if (!HINIC_IS_VF(hwdev->hwif)) {
1079 dev_err(&hwdev->hwif->pdev->dev, "Params error, func_type: %d\n",
1080 HINIC_FUNC_TYPE(hwdev->hwif));
1081 return -EINVAL;
1084 return hinic_mbox_to_func(func_to_func, mod, cmd,
1085 hinic_pf_id_of_vf_hw(hwdev->hwif), buf_in,
1086 in_size, buf_out, out_size, timeout);
1089 int hinic_mbox_to_vf(struct hinic_hwdev *hwdev,
1090 enum hinic_mod_type mod, u16 vf_id, u8 cmd, void *buf_in,
1091 u16 in_size, void *buf_out, u16 *out_size, u32 timeout)
1093 struct hinic_mbox_func_to_func *func_to_func;
1094 u16 dst_func_idx;
1095 int err;
1097 if (!hwdev)
1098 return -EINVAL;
1100 func_to_func = hwdev->func_to_func;
1101 err = mbox_func_params_valid(func_to_func, buf_in, in_size);
1102 if (err)
1103 return err;
1105 if (HINIC_IS_VF(hwdev->hwif)) {
1106 dev_err(&hwdev->hwif->pdev->dev, "Params error, func_type: %d\n",
1107 HINIC_FUNC_TYPE(hwdev->hwif));
1108 return -EINVAL;
1111 if (!vf_id) {
1112 dev_err(&hwdev->hwif->pdev->dev,
1113 "VF id(%d) error!\n", vf_id);
1114 return -EINVAL;
1117 /* vf_offset_to_pf + vf_id is the vf's global function id of vf in
1118 * this pf
1120 dst_func_idx = hinic_glb_pf_vf_offset(hwdev->hwif) + vf_id;
1122 return hinic_mbox_to_func(func_to_func, mod, cmd, dst_func_idx, buf_in,
1123 in_size, buf_out, out_size, timeout);
1126 static int init_mbox_info(struct hinic_recv_mbox *mbox_info)
1128 int err;
1130 mbox_info->seq_id = SEQ_ID_MAX_VAL;
1132 mbox_info->mbox = kzalloc(MBOX_MAX_BUF_SZ, GFP_KERNEL);
1133 if (!mbox_info->mbox)
1134 return -ENOMEM;
1136 mbox_info->buf_out = kzalloc(MBOX_MAX_BUF_SZ, GFP_KERNEL);
1137 if (!mbox_info->buf_out) {
1138 err = -ENOMEM;
1139 goto err_alloc_buf_out;
1142 atomic_set(&mbox_info->msg_cnt, 0);
1144 return 0;
1146 err_alloc_buf_out:
1147 kfree(mbox_info->mbox);
1149 return err;
1152 static void clean_mbox_info(struct hinic_recv_mbox *mbox_info)
1154 kfree(mbox_info->buf_out);
1155 kfree(mbox_info->mbox);
1158 static int alloc_mbox_info(struct hinic_hwdev *hwdev,
1159 struct hinic_recv_mbox *mbox_info)
1161 u16 func_idx, i;
1162 int err;
1164 for (func_idx = 0; func_idx < HINIC_MAX_FUNCTIONS; func_idx++) {
1165 err = init_mbox_info(&mbox_info[func_idx]);
1166 if (err) {
1167 dev_err(&hwdev->hwif->pdev->dev, "Failed to init function %d mbox info\n",
1168 func_idx);
1169 goto err_init_mbox_info;
1173 return 0;
1175 err_init_mbox_info:
1176 for (i = 0; i < func_idx; i++)
1177 clean_mbox_info(&mbox_info[i]);
1179 return err;
1182 static void free_mbox_info(struct hinic_recv_mbox *mbox_info)
1184 u16 func_idx;
1186 for (func_idx = 0; func_idx < HINIC_MAX_FUNCTIONS; func_idx++)
1187 clean_mbox_info(&mbox_info[func_idx]);
1190 static void prepare_send_mbox(struct hinic_mbox_func_to_func *func_to_func)
1192 struct hinic_send_mbox *send_mbox = &func_to_func->send_mbox;
1194 send_mbox->data = MBOX_AREA(func_to_func->hwif);
1197 static int alloc_mbox_wb_status(struct hinic_mbox_func_to_func *func_to_func)
1199 struct hinic_send_mbox *send_mbox = &func_to_func->send_mbox;
1200 struct hinic_hwdev *hwdev = func_to_func->hwdev;
1201 u32 addr_h, addr_l;
1203 send_mbox->wb_vaddr = dma_alloc_coherent(&hwdev->hwif->pdev->dev,
1204 MBOX_WB_STATUS_LEN,
1205 &send_mbox->wb_paddr,
1206 GFP_KERNEL);
1207 if (!send_mbox->wb_vaddr)
1208 return -ENOMEM;
1210 send_mbox->wb_status = send_mbox->wb_vaddr;
1212 addr_h = upper_32_bits(send_mbox->wb_paddr);
1213 addr_l = lower_32_bits(send_mbox->wb_paddr);
1215 hinic_hwif_write_reg(hwdev->hwif, HINIC_FUNC_CSR_MAILBOX_RESULT_H_OFF,
1216 addr_h);
1217 hinic_hwif_write_reg(hwdev->hwif, HINIC_FUNC_CSR_MAILBOX_RESULT_L_OFF,
1218 addr_l);
1220 return 0;
1223 static void free_mbox_wb_status(struct hinic_mbox_func_to_func *func_to_func)
1225 struct hinic_send_mbox *send_mbox = &func_to_func->send_mbox;
1226 struct hinic_hwdev *hwdev = func_to_func->hwdev;
1228 hinic_hwif_write_reg(hwdev->hwif, HINIC_FUNC_CSR_MAILBOX_RESULT_H_OFF,
1230 hinic_hwif_write_reg(hwdev->hwif, HINIC_FUNC_CSR_MAILBOX_RESULT_L_OFF,
1233 dma_free_coherent(&hwdev->hwif->pdev->dev, MBOX_WB_STATUS_LEN,
1234 send_mbox->wb_vaddr,
1235 send_mbox->wb_paddr);
1238 bool hinic_mbox_check_cmd_valid(struct hinic_hwdev *hwdev,
1239 struct vf_cmd_check_handle *cmd_handle,
1240 u16 vf_id, u8 cmd, void *buf_in,
1241 u16 in_size, u8 size)
1243 u16 src_idx = vf_id + hinic_glb_pf_vf_offset(hwdev->hwif);
1244 int i;
1246 for (i = 0; i < size; i++) {
1247 if (cmd == cmd_handle[i].cmd) {
1248 if (cmd_handle[i].check_cmd)
1249 return cmd_handle[i].check_cmd(hwdev, src_idx,
1250 buf_in, in_size);
1251 else
1252 return true;
1256 dev_err(&hwdev->hwif->pdev->dev,
1257 "PF Receive VF(%d) unsupported cmd(0x%x)\n",
1258 vf_id + hinic_glb_pf_vf_offset(hwdev->hwif), cmd);
1260 return false;
1263 static bool hinic_cmdq_check_vf_ctxt(struct hinic_hwdev *hwdev,
1264 struct hinic_cmdq_ctxt *cmdq_ctxt)
1266 struct hinic_cmdq_ctxt_info *ctxt_info = &cmdq_ctxt->ctxt_info;
1267 u64 curr_pg_pfn, wq_block_pfn;
1269 if (cmdq_ctxt->ppf_idx != HINIC_HWIF_PPF_IDX(hwdev->hwif) ||
1270 cmdq_ctxt->cmdq_type > HINIC_MAX_CMDQ_TYPES)
1271 return false;
1273 curr_pg_pfn = HINIC_CMDQ_CTXT_PAGE_INFO_GET
1274 (ctxt_info->curr_wqe_page_pfn, CURR_WQE_PAGE_PFN);
1275 wq_block_pfn = HINIC_CMDQ_CTXT_BLOCK_INFO_GET
1276 (ctxt_info->wq_block_pfn, WQ_BLOCK_PFN);
1277 /* VF must use 0-level CLA */
1278 if (curr_pg_pfn != wq_block_pfn)
1279 return false;
1281 return true;
1284 static bool check_cmdq_ctxt(struct hinic_hwdev *hwdev, u16 func_idx,
1285 void *buf_in, u16 in_size)
1287 if (!hinic_mbox_check_func_id_8B(hwdev, func_idx, buf_in, in_size))
1288 return false;
1290 return hinic_cmdq_check_vf_ctxt(hwdev, buf_in);
1293 #define HW_CTX_QPS_VALID(hw_ctxt) \
1294 ((hw_ctxt)->rq_depth >= HINIC_QUEUE_MIN_DEPTH && \
1295 (hw_ctxt)->rq_depth <= HINIC_QUEUE_MAX_DEPTH && \
1296 (hw_ctxt)->sq_depth >= HINIC_QUEUE_MIN_DEPTH && \
1297 (hw_ctxt)->sq_depth <= HINIC_QUEUE_MAX_DEPTH && \
1298 (hw_ctxt)->rx_buf_sz_idx <= HINIC_MAX_RX_BUFFER_SIZE)
1300 static bool hw_ctxt_qps_param_valid(struct hinic_cmd_hw_ioctxt *hw_ctxt)
1302 if (HW_CTX_QPS_VALID(hw_ctxt))
1303 return true;
1305 if (!hw_ctxt->rq_depth && !hw_ctxt->sq_depth &&
1306 !hw_ctxt->rx_buf_sz_idx)
1307 return true;
1309 return false;
1312 static bool check_hwctxt(struct hinic_hwdev *hwdev, u16 func_idx,
1313 void *buf_in, u16 in_size)
1315 struct hinic_cmd_hw_ioctxt *hw_ctxt = buf_in;
1317 if (!hinic_mbox_check_func_id_8B(hwdev, func_idx, buf_in, in_size))
1318 return false;
1320 if (hw_ctxt->ppf_idx != HINIC_HWIF_PPF_IDX(hwdev->hwif))
1321 return false;
1323 if (hw_ctxt->set_cmdq_depth) {
1324 if (hw_ctxt->cmdq_depth >= HINIC_QUEUE_MIN_DEPTH &&
1325 hw_ctxt->cmdq_depth <= HINIC_QUEUE_MAX_DEPTH)
1326 return true;
1328 return false;
1331 return hw_ctxt_qps_param_valid(hw_ctxt);
1334 static bool check_set_wq_page_size(struct hinic_hwdev *hwdev, u16 func_idx,
1335 void *buf_in, u16 in_size)
1337 struct hinic_wq_page_size *page_size_info = buf_in;
1339 if (!hinic_mbox_check_func_id_8B(hwdev, func_idx, buf_in, in_size))
1340 return false;
1342 if (page_size_info->ppf_idx != HINIC_HWIF_PPF_IDX(hwdev->hwif))
1343 return false;
1345 if (((1U << page_size_info->page_size) * SZ_4K) !=
1346 HINIC_DEFAULT_WQ_PAGE_SIZE)
1347 return false;
1349 return true;
1352 static struct vf_cmd_check_handle hw_cmd_support_vf[] = {
1353 {HINIC_COMM_CMD_START_FLR, hinic_mbox_check_func_id_8B},
1354 {HINIC_COMM_CMD_DMA_ATTR_SET, hinic_mbox_check_func_id_8B},
1355 {HINIC_COMM_CMD_CMDQ_CTXT_SET, check_cmdq_ctxt},
1356 {HINIC_COMM_CMD_CMDQ_CTXT_GET, check_cmdq_ctxt},
1357 {HINIC_COMM_CMD_HWCTXT_SET, check_hwctxt},
1358 {HINIC_COMM_CMD_HWCTXT_GET, check_hwctxt},
1359 {HINIC_COMM_CMD_SQ_HI_CI_SET, hinic_mbox_check_func_id_8B},
1360 {HINIC_COMM_CMD_RES_STATE_SET, hinic_mbox_check_func_id_8B},
1361 {HINIC_COMM_CMD_IO_RES_CLEAR, hinic_mbox_check_func_id_8B},
1362 {HINIC_COMM_CMD_CEQ_CTRL_REG_WR_BY_UP, hinic_mbox_check_func_id_8B},
1363 {HINIC_COMM_CMD_MSI_CTRL_REG_WR_BY_UP, hinic_mbox_check_func_id_8B},
1364 {HINIC_COMM_CMD_MSI_CTRL_REG_RD_BY_UP, hinic_mbox_check_func_id_8B},
1365 {HINIC_COMM_CMD_L2NIC_RESET, hinic_mbox_check_func_id_8B},
1366 {HINIC_COMM_CMD_PAGESIZE_SET, check_set_wq_page_size},
1369 static int comm_pf_mbox_handler(void *handle, u16 vf_id, u8 cmd, void *buf_in,
1370 u16 in_size, void *buf_out, u16 *out_size)
1372 u8 size = ARRAY_SIZE(hw_cmd_support_vf);
1373 struct hinic_hwdev *hwdev = handle;
1374 struct hinic_pfhwdev *pfhwdev;
1375 int err = 0;
1377 pfhwdev = container_of(hwdev, struct hinic_pfhwdev, hwdev);
1379 if (!hinic_mbox_check_cmd_valid(handle, hw_cmd_support_vf, vf_id, cmd,
1380 buf_in, in_size, size)) {
1381 dev_err(&hwdev->hwif->pdev->dev,
1382 "PF Receive VF: %d common cmd: 0x%x or mbox len: 0x%x is invalid\n",
1383 vf_id + hinic_glb_pf_vf_offset(hwdev->hwif), cmd,
1384 in_size);
1385 return HINIC_MBOX_VF_CMD_ERROR;
1388 if (cmd == HINIC_COMM_CMD_START_FLR) {
1389 *out_size = 0;
1390 } else {
1391 err = hinic_msg_to_mgmt(&pfhwdev->pf_to_mgmt, HINIC_MOD_COMM,
1392 cmd, buf_in, in_size, buf_out, out_size,
1393 HINIC_MGMT_MSG_SYNC);
1394 if (err && err != HINIC_MBOX_PF_BUSY_ACTIVE_FW)
1395 dev_err(&hwdev->hwif->pdev->dev,
1396 "PF mbox common callback handler err: %d\n",
1397 err);
1400 return err;
1403 int hinic_func_to_func_init(struct hinic_hwdev *hwdev)
1405 struct hinic_mbox_func_to_func *func_to_func;
1406 struct hinic_pfhwdev *pfhwdev;
1407 int err;
1409 pfhwdev = container_of(hwdev, struct hinic_pfhwdev, hwdev);
1410 func_to_func = kzalloc(sizeof(*func_to_func), GFP_KERNEL);
1411 if (!func_to_func)
1412 return -ENOMEM;
1414 hwdev->func_to_func = func_to_func;
1415 func_to_func->hwdev = hwdev;
1416 func_to_func->hwif = hwdev->hwif;
1417 sema_init(&func_to_func->mbox_send_sem, 1);
1418 sema_init(&func_to_func->msg_send_sem, 1);
1419 spin_lock_init(&func_to_func->mbox_lock);
1420 func_to_func->workq = create_singlethread_workqueue(HINIC_MBOX_WQ_NAME);
1421 if (!func_to_func->workq) {
1422 dev_err(&hwdev->hwif->pdev->dev, "Failed to initialize MBOX workqueue\n");
1423 err = -ENOMEM;
1424 goto err_create_mbox_workq;
1427 err = alloc_mbox_info(hwdev, func_to_func->mbox_send);
1428 if (err) {
1429 dev_err(&hwdev->hwif->pdev->dev, "Failed to alloc mem for mbox_active\n");
1430 goto err_alloc_mbox_for_send;
1433 err = alloc_mbox_info(hwdev, func_to_func->mbox_resp);
1434 if (err) {
1435 dev_err(&hwdev->hwif->pdev->dev, "Failed to alloc mem for mbox_passive\n");
1436 goto err_alloc_mbox_for_resp;
1439 err = alloc_mbox_wb_status(func_to_func);
1440 if (err) {
1441 dev_err(&hwdev->hwif->pdev->dev, "Failed to alloc mbox write back status\n");
1442 goto err_alloc_wb_status;
1445 prepare_send_mbox(func_to_func);
1447 hinic_aeq_register_hw_cb(&hwdev->aeqs, HINIC_MBX_FROM_FUNC,
1448 &pfhwdev->hwdev, hinic_mbox_func_aeqe_handler);
1449 hinic_aeq_register_hw_cb(&hwdev->aeqs, HINIC_MBX_SEND_RSLT,
1450 &pfhwdev->hwdev, hinic_mbox_self_aeqe_handler);
1452 if (!HINIC_IS_VF(hwdev->hwif))
1453 hinic_register_pf_mbox_cb(hwdev, HINIC_MOD_COMM,
1454 comm_pf_mbox_handler);
1456 return 0;
1458 err_alloc_wb_status:
1459 free_mbox_info(func_to_func->mbox_resp);
1461 err_alloc_mbox_for_resp:
1462 free_mbox_info(func_to_func->mbox_send);
1464 err_alloc_mbox_for_send:
1465 destroy_workqueue(func_to_func->workq);
1467 err_create_mbox_workq:
1468 kfree(func_to_func);
1470 return err;
1473 void hinic_func_to_func_free(struct hinic_hwdev *hwdev)
1475 struct hinic_mbox_func_to_func *func_to_func = hwdev->func_to_func;
1477 hinic_aeq_unregister_hw_cb(&hwdev->aeqs, HINIC_MBX_FROM_FUNC);
1478 hinic_aeq_unregister_hw_cb(&hwdev->aeqs, HINIC_MBX_SEND_RSLT);
1480 hinic_unregister_pf_mbox_cb(hwdev, HINIC_MOD_COMM);
1481 /* destroy workqueue before free related mbox resources in case of
1482 * illegal resource access
1484 destroy_workqueue(func_to_func->workq);
1486 free_mbox_wb_status(func_to_func);
1487 free_mbox_info(func_to_func->mbox_resp);
1488 free_mbox_info(func_to_func->mbox_send);
1490 kfree(func_to_func);
1493 int hinic_vf_mbox_random_id_init(struct hinic_hwdev *hwdev)
1495 u16 vf_offset;
1496 u8 vf_in_pf;
1497 int err = 0;
1499 if (HINIC_IS_VF(hwdev->hwif))
1500 return 0;
1502 vf_offset = hinic_glb_pf_vf_offset(hwdev->hwif);
1504 for (vf_in_pf = 1; vf_in_pf <= hwdev->nic_cap.max_vf; vf_in_pf++) {
1505 err = set_vf_mbox_random_id(hwdev, vf_offset + vf_in_pf);
1506 if (err)
1507 break;
1510 if (err == HINIC_MGMT_CMD_UNSUPPORTED) {
1511 hwdev->func_to_func->support_vf_random = false;
1512 err = 0;
1513 dev_warn(&hwdev->hwif->pdev->dev, "Mgmt is unsupported to set VF%d random id\n",
1514 vf_in_pf - 1);
1515 } else if (!err) {
1516 hwdev->func_to_func->support_vf_random = true;
1519 return err;