1 // SPDX-License-Identifier: GPL-2.0-only
3 * Huawei HiNIC PCI Express Linux driver
4 * Copyright(c) 2017 Huawei Technologies Co., Ltd
7 #include <linux/kernel.h>
8 #include <linux/types.h>
9 #include <linux/errno.h>
10 #include <linux/pci.h>
11 #include <linux/device.h>
12 #include <linux/slab.h>
13 #include <linux/vmalloc.h>
14 #include <linux/spinlock.h>
15 #include <linux/sizes.h>
16 #include <linux/atomic.h>
17 #include <linux/log2.h>
19 #include <linux/completion.h>
20 #include <linux/err.h>
21 #include <asm/byteorder.h>
22 #include <asm/barrier.h>
24 #include "hinic_common.h"
25 #include "hinic_hw_if.h"
26 #include "hinic_hw_eqs.h"
27 #include "hinic_hw_mgmt.h"
28 #include "hinic_hw_wqe.h"
29 #include "hinic_hw_wq.h"
30 #include "hinic_hw_cmdq.h"
31 #include "hinic_hw_io.h"
32 #include "hinic_hw_dev.h"
34 #define CMDQ_CEQE_TYPE_SHIFT 0
36 #define CMDQ_CEQE_TYPE_MASK 0x7
38 #define CMDQ_CEQE_GET(val, member) \
39 (((val) >> CMDQ_CEQE_##member##_SHIFT) \
40 & CMDQ_CEQE_##member##_MASK)
42 #define CMDQ_WQE_ERRCODE_VAL_SHIFT 20
44 #define CMDQ_WQE_ERRCODE_VAL_MASK 0xF
46 #define CMDQ_WQE_ERRCODE_GET(val, member) \
47 (((val) >> CMDQ_WQE_ERRCODE_##member##_SHIFT) \
48 & CMDQ_WQE_ERRCODE_##member##_MASK)
50 #define CMDQ_DB_PI_OFF(pi) (((u16)LOWER_8_BITS(pi)) << 3)
52 #define CMDQ_DB_ADDR(db_base, pi) ((db_base) + CMDQ_DB_PI_OFF(pi))
54 #define CMDQ_WQE_HEADER(wqe) ((struct hinic_cmdq_header *)(wqe))
56 #define CMDQ_WQE_COMPLETED(ctrl_info) \
57 HINIC_CMDQ_CTRL_GET(ctrl_info, HW_BUSY_BIT)
59 #define FIRST_DATA_TO_WRITE_LAST sizeof(u64)
61 #define CMDQ_DB_OFF SZ_2K
63 #define CMDQ_WQEBB_SIZE 64
64 #define CMDQ_WQE_SIZE 64
65 #define CMDQ_DEPTH SZ_4K
67 #define CMDQ_WQ_PAGE_SIZE SZ_256K
69 #define WQE_LCMD_SIZE 64
70 #define WQE_SCMD_SIZE 64
72 #define COMPLETE_LEN 3
74 #define CMDQ_TIMEOUT 1000
76 #define CMDQ_PFN(addr, page_size) ((addr) >> (ilog2(page_size)))
78 #define cmdq_to_cmdqs(cmdq) container_of((cmdq) - (cmdq)->cmdq_type, \
79 struct hinic_cmdqs, cmdq[0])
81 #define cmdqs_to_func_to_io(cmdqs) container_of(cmdqs, \
82 struct hinic_func_to_io, \
90 enum completion_format
{
101 BUFDESC_LCMD_LEN
= 2, /* 16 bytes - 2(8 byte unit) */
102 BUFDESC_SCMD_LEN
= 3, /* 24 bytes - 3(8 byte unit) */
106 CTRL_SECT_LEN
= 1, /* 4 bytes (ctrl) - 1(8 byte unit) */
107 CTRL_DIRECT_SECT_LEN
= 2, /* 12 bytes (ctrl + rsvd) - 2(8 byte unit) */
110 enum cmdq_scmd_type
{
111 CMDQ_SET_ARM_CMD
= 2,
115 CMDQ_CMD_SYNC_DIRECT_RESP
= 0,
116 CMDQ_CMD_SYNC_SGE_RESP
= 1,
119 enum completion_request
{
125 * hinic_alloc_cmdq_buf - alloc buffer for sending command
127 * @cmdq_buf: the buffer returned in this struct
129 * Return 0 - Success, negative - Failure
131 int hinic_alloc_cmdq_buf(struct hinic_cmdqs
*cmdqs
,
132 struct hinic_cmdq_buf
*cmdq_buf
)
134 struct hinic_hwif
*hwif
= cmdqs
->hwif
;
135 struct pci_dev
*pdev
= hwif
->pdev
;
137 cmdq_buf
->buf
= dma_pool_alloc(cmdqs
->cmdq_buf_pool
, GFP_KERNEL
,
138 &cmdq_buf
->dma_addr
);
139 if (!cmdq_buf
->buf
) {
140 dev_err(&pdev
->dev
, "Failed to allocate cmd from the pool\n");
148 * hinic_free_cmdq_buf - free buffer
150 * @cmdq_buf: the buffer to free that is in this struct
152 void hinic_free_cmdq_buf(struct hinic_cmdqs
*cmdqs
,
153 struct hinic_cmdq_buf
*cmdq_buf
)
155 dma_pool_free(cmdqs
->cmdq_buf_pool
, cmdq_buf
->buf
, cmdq_buf
->dma_addr
);
158 static unsigned int cmdq_wqe_size_from_bdlen(enum bufdesc_len len
)
160 unsigned int wqe_size
= 0;
163 case BUFDESC_LCMD_LEN
:
164 wqe_size
= WQE_LCMD_SIZE
;
166 case BUFDESC_SCMD_LEN
:
167 wqe_size
= WQE_SCMD_SIZE
;
174 static void cmdq_set_sge_completion(struct hinic_cmdq_completion
*completion
,
175 struct hinic_cmdq_buf
*buf_out
)
177 struct hinic_sge_resp
*sge_resp
= &completion
->sge_resp
;
179 hinic_set_sge(&sge_resp
->sge
, buf_out
->dma_addr
, buf_out
->size
);
182 static void cmdq_prepare_wqe_ctrl(struct hinic_cmdq_wqe
*wqe
, int wrapped
,
183 enum hinic_cmd_ack_type ack_type
,
184 enum hinic_mod_type mod
, u8 cmd
, u16 prod_idx
,
185 enum completion_format complete_format
,
186 enum data_format data_format
,
187 enum bufdesc_len buf_len
)
189 struct hinic_cmdq_wqe_lcmd
*wqe_lcmd
;
190 struct hinic_cmdq_wqe_scmd
*wqe_scmd
;
191 enum ctrl_sect_len ctrl_len
;
192 struct hinic_ctrl
*ctrl
;
195 if (data_format
== DATA_SGE
) {
196 wqe_lcmd
= &wqe
->wqe_lcmd
;
198 wqe_lcmd
->status
.status_info
= 0;
199 ctrl
= &wqe_lcmd
->ctrl
;
200 ctrl_len
= CTRL_SECT_LEN
;
202 wqe_scmd
= &wqe
->direct_wqe
.wqe_scmd
;
204 wqe_scmd
->status
.status_info
= 0;
205 ctrl
= &wqe_scmd
->ctrl
;
206 ctrl_len
= CTRL_DIRECT_SECT_LEN
;
209 ctrl
->ctrl_info
= HINIC_CMDQ_CTRL_SET(prod_idx
, PI
) |
210 HINIC_CMDQ_CTRL_SET(cmd
, CMD
) |
211 HINIC_CMDQ_CTRL_SET(mod
, MOD
) |
212 HINIC_CMDQ_CTRL_SET(ack_type
, ACK_TYPE
);
214 CMDQ_WQE_HEADER(wqe
)->header_info
=
215 HINIC_CMDQ_WQE_HEADER_SET(buf_len
, BUFDESC_LEN
) |
216 HINIC_CMDQ_WQE_HEADER_SET(complete_format
, COMPLETE_FMT
) |
217 HINIC_CMDQ_WQE_HEADER_SET(data_format
, DATA_FMT
) |
218 HINIC_CMDQ_WQE_HEADER_SET(CEQ_SET
, COMPLETE_REQ
) |
219 HINIC_CMDQ_WQE_HEADER_SET(COMPLETE_LEN
, COMPLETE_SECT_LEN
) |
220 HINIC_CMDQ_WQE_HEADER_SET(ctrl_len
, CTRL_LEN
) |
221 HINIC_CMDQ_WQE_HEADER_SET(wrapped
, TOGGLED_WRAPPED
);
223 saved_data
= CMDQ_WQE_HEADER(wqe
)->saved_data
;
224 saved_data
= HINIC_SAVED_DATA_CLEAR(saved_data
, ARM
);
226 if ((cmd
== CMDQ_SET_ARM_CMD
) && (mod
== HINIC_MOD_COMM
))
227 CMDQ_WQE_HEADER(wqe
)->saved_data
|=
228 HINIC_SAVED_DATA_SET(1, ARM
);
230 CMDQ_WQE_HEADER(wqe
)->saved_data
= saved_data
;
233 static void cmdq_set_lcmd_bufdesc(struct hinic_cmdq_wqe_lcmd
*wqe_lcmd
,
234 struct hinic_cmdq_buf
*buf_in
)
236 hinic_set_sge(&wqe_lcmd
->buf_desc
.sge
, buf_in
->dma_addr
, buf_in
->size
);
239 static void cmdq_set_direct_wqe_data(struct hinic_cmdq_direct_wqe
*wqe
,
240 void *buf_in
, u32 in_size
)
242 struct hinic_cmdq_wqe_scmd
*wqe_scmd
= &wqe
->wqe_scmd
;
244 wqe_scmd
->buf_desc
.buf_len
= in_size
;
245 memcpy(wqe_scmd
->buf_desc
.data
, buf_in
, in_size
);
248 static void cmdq_set_lcmd_wqe(struct hinic_cmdq_wqe
*wqe
,
249 enum cmdq_cmd_type cmd_type
,
250 struct hinic_cmdq_buf
*buf_in
,
251 struct hinic_cmdq_buf
*buf_out
, int wrapped
,
252 enum hinic_cmd_ack_type ack_type
,
253 enum hinic_mod_type mod
, u8 cmd
, u16 prod_idx
)
255 struct hinic_cmdq_wqe_lcmd
*wqe_lcmd
= &wqe
->wqe_lcmd
;
256 enum completion_format complete_format
;
259 case CMDQ_CMD_SYNC_SGE_RESP
:
260 complete_format
= COMPLETE_SGE
;
261 cmdq_set_sge_completion(&wqe_lcmd
->completion
, buf_out
);
263 case CMDQ_CMD_SYNC_DIRECT_RESP
:
264 complete_format
= COMPLETE_DIRECT
;
265 wqe_lcmd
->completion
.direct_resp
= 0;
269 cmdq_prepare_wqe_ctrl(wqe
, wrapped
, ack_type
, mod
, cmd
,
270 prod_idx
, complete_format
, DATA_SGE
,
273 cmdq_set_lcmd_bufdesc(wqe_lcmd
, buf_in
);
276 static void cmdq_set_direct_wqe(struct hinic_cmdq_wqe
*wqe
,
277 enum cmdq_cmd_type cmd_type
,
278 void *buf_in
, u16 in_size
,
279 struct hinic_cmdq_buf
*buf_out
, int wrapped
,
280 enum hinic_cmd_ack_type ack_type
,
281 enum hinic_mod_type mod
, u8 cmd
, u16 prod_idx
)
283 struct hinic_cmdq_direct_wqe
*direct_wqe
= &wqe
->direct_wqe
;
284 enum completion_format complete_format
;
285 struct hinic_cmdq_wqe_scmd
*wqe_scmd
;
287 wqe_scmd
= &direct_wqe
->wqe_scmd
;
290 case CMDQ_CMD_SYNC_SGE_RESP
:
291 complete_format
= COMPLETE_SGE
;
292 cmdq_set_sge_completion(&wqe_scmd
->completion
, buf_out
);
294 case CMDQ_CMD_SYNC_DIRECT_RESP
:
295 complete_format
= COMPLETE_DIRECT
;
296 wqe_scmd
->completion
.direct_resp
= 0;
300 cmdq_prepare_wqe_ctrl(wqe
, wrapped
, ack_type
, mod
, cmd
, prod_idx
,
301 complete_format
, DATA_DIRECT
, BUFDESC_SCMD_LEN
);
303 cmdq_set_direct_wqe_data(direct_wqe
, buf_in
, in_size
);
306 static void cmdq_wqe_fill(void *dst
, void *src
)
308 memcpy(dst
+ FIRST_DATA_TO_WRITE_LAST
, src
+ FIRST_DATA_TO_WRITE_LAST
,
309 CMDQ_WQE_SIZE
- FIRST_DATA_TO_WRITE_LAST
);
311 wmb(); /* The first 8 bytes should be written last */
313 *(u64
*)dst
= *(u64
*)src
;
316 static void cmdq_fill_db(u32
*db_info
,
317 enum hinic_cmdq_type cmdq_type
, u16 prod_idx
)
319 *db_info
= HINIC_CMDQ_DB_INFO_SET(UPPER_8_BITS(prod_idx
), HI_PROD_IDX
) |
320 HINIC_CMDQ_DB_INFO_SET(HINIC_CTRL_PATH
, PATH
) |
321 HINIC_CMDQ_DB_INFO_SET(cmdq_type
, CMDQ_TYPE
) |
322 HINIC_CMDQ_DB_INFO_SET(HINIC_DB_CMDQ_TYPE
, DB_TYPE
);
325 static void cmdq_set_db(struct hinic_cmdq
*cmdq
,
326 enum hinic_cmdq_type cmdq_type
, u16 prod_idx
)
330 cmdq_fill_db(&db_info
, cmdq_type
, prod_idx
);
332 /* The data that is written to HW should be in Big Endian Format */
333 db_info
= cpu_to_be32(db_info
);
335 wmb(); /* write all before the doorbell */
337 writel(db_info
, CMDQ_DB_ADDR(cmdq
->db_base
, prod_idx
));
340 static int cmdq_sync_cmd_direct_resp(struct hinic_cmdq
*cmdq
,
341 enum hinic_mod_type mod
, u8 cmd
,
342 struct hinic_cmdq_buf
*buf_in
,
345 struct hinic_cmdq_wqe
*curr_cmdq_wqe
, cmdq_wqe
;
346 u16 curr_prod_idx
, next_prod_idx
;
347 int errcode
, wrapped
, num_wqebbs
;
348 struct hinic_wq
*wq
= cmdq
->wq
;
349 struct hinic_hw_wqe
*hw_wqe
;
350 struct completion done
;
352 /* Keep doorbell index correct. bh - for tasklet(ceq). */
353 spin_lock_bh(&cmdq
->cmdq_lock
);
355 /* WQE_SIZE = WQEBB_SIZE, we will get the wq element and not shadow*/
356 hw_wqe
= hinic_get_wqe(wq
, WQE_LCMD_SIZE
, &curr_prod_idx
);
357 if (IS_ERR(hw_wqe
)) {
358 spin_unlock_bh(&cmdq
->cmdq_lock
);
362 curr_cmdq_wqe
= &hw_wqe
->cmdq_wqe
;
364 wrapped
= cmdq
->wrapped
;
366 num_wqebbs
= ALIGN(WQE_LCMD_SIZE
, wq
->wqebb_size
) / wq
->wqebb_size
;
367 next_prod_idx
= curr_prod_idx
+ num_wqebbs
;
368 if (next_prod_idx
>= wq
->q_depth
) {
369 cmdq
->wrapped
= !cmdq
->wrapped
;
370 next_prod_idx
-= wq
->q_depth
;
373 cmdq
->errcode
[curr_prod_idx
] = &errcode
;
375 init_completion(&done
);
376 cmdq
->done
[curr_prod_idx
] = &done
;
378 cmdq_set_lcmd_wqe(&cmdq_wqe
, CMDQ_CMD_SYNC_DIRECT_RESP
, buf_in
, NULL
,
379 wrapped
, HINIC_CMD_ACK_TYPE_CMDQ
, mod
, cmd
,
382 /* The data that is written to HW should be in Big Endian Format */
383 hinic_cpu_to_be32(&cmdq_wqe
, WQE_LCMD_SIZE
);
385 /* CMDQ WQE is not shadow, therefore wqe will be written to wq */
386 cmdq_wqe_fill(curr_cmdq_wqe
, &cmdq_wqe
);
388 cmdq_set_db(cmdq
, HINIC_CMDQ_SYNC
, next_prod_idx
);
390 spin_unlock_bh(&cmdq
->cmdq_lock
);
392 if (!wait_for_completion_timeout(&done
,
393 msecs_to_jiffies(CMDQ_TIMEOUT
))) {
394 spin_lock_bh(&cmdq
->cmdq_lock
);
396 if (cmdq
->errcode
[curr_prod_idx
] == &errcode
)
397 cmdq
->errcode
[curr_prod_idx
] = NULL
;
399 if (cmdq
->done
[curr_prod_idx
] == &done
)
400 cmdq
->done
[curr_prod_idx
] = NULL
;
402 spin_unlock_bh(&cmdq
->cmdq_lock
);
404 hinic_dump_ceq_info(cmdq
->hwdev
);
408 smp_rmb(); /* read error code after completion */
411 struct hinic_cmdq_wqe_lcmd
*wqe_lcmd
= &curr_cmdq_wqe
->wqe_lcmd
;
413 *resp
= cpu_to_be64(wqe_lcmd
->completion
.direct_resp
);
422 static int cmdq_set_arm_bit(struct hinic_cmdq
*cmdq
, void *buf_in
,
425 struct hinic_cmdq_wqe
*curr_cmdq_wqe
, cmdq_wqe
;
426 u16 curr_prod_idx
, next_prod_idx
;
427 struct hinic_wq
*wq
= cmdq
->wq
;
428 struct hinic_hw_wqe
*hw_wqe
;
429 int wrapped
, num_wqebbs
;
431 /* Keep doorbell index correct */
432 spin_lock(&cmdq
->cmdq_lock
);
434 /* WQE_SIZE = WQEBB_SIZE, we will get the wq element and not shadow*/
435 hw_wqe
= hinic_get_wqe(wq
, WQE_SCMD_SIZE
, &curr_prod_idx
);
436 if (IS_ERR(hw_wqe
)) {
437 spin_unlock(&cmdq
->cmdq_lock
);
441 curr_cmdq_wqe
= &hw_wqe
->cmdq_wqe
;
443 wrapped
= cmdq
->wrapped
;
445 num_wqebbs
= ALIGN(WQE_SCMD_SIZE
, wq
->wqebb_size
) / wq
->wqebb_size
;
446 next_prod_idx
= curr_prod_idx
+ num_wqebbs
;
447 if (next_prod_idx
>= wq
->q_depth
) {
448 cmdq
->wrapped
= !cmdq
->wrapped
;
449 next_prod_idx
-= wq
->q_depth
;
452 cmdq_set_direct_wqe(&cmdq_wqe
, CMDQ_CMD_SYNC_DIRECT_RESP
, buf_in
,
453 in_size
, NULL
, wrapped
, HINIC_CMD_ACK_TYPE_CMDQ
,
454 HINIC_MOD_COMM
, CMDQ_SET_ARM_CMD
, curr_prod_idx
);
456 /* The data that is written to HW should be in Big Endian Format */
457 hinic_cpu_to_be32(&cmdq_wqe
, WQE_SCMD_SIZE
);
459 /* cmdq wqe is not shadow, therefore wqe will be written to wq */
460 cmdq_wqe_fill(curr_cmdq_wqe
, &cmdq_wqe
);
462 cmdq_set_db(cmdq
, HINIC_CMDQ_SYNC
, next_prod_idx
);
464 spin_unlock(&cmdq
->cmdq_lock
);
468 static int cmdq_params_valid(struct hinic_cmdq_buf
*buf_in
)
470 if (buf_in
->size
> HINIC_CMDQ_MAX_DATA_SIZE
)
477 * hinic_cmdq_direct_resp - send command with direct data as resp
479 * @mod: module on the card that will handle the command
481 * @buf_in: the buffer for the command
482 * @resp: the response to return
484 * Return 0 - Success, negative - Failure
486 int hinic_cmdq_direct_resp(struct hinic_cmdqs
*cmdqs
,
487 enum hinic_mod_type mod
, u8 cmd
,
488 struct hinic_cmdq_buf
*buf_in
, u64
*resp
)
490 struct hinic_hwif
*hwif
= cmdqs
->hwif
;
491 struct pci_dev
*pdev
= hwif
->pdev
;
494 err
= cmdq_params_valid(buf_in
);
496 dev_err(&pdev
->dev
, "Invalid CMDQ parameters\n");
500 return cmdq_sync_cmd_direct_resp(&cmdqs
->cmdq
[HINIC_CMDQ_SYNC
],
501 mod
, cmd
, buf_in
, resp
);
505 * hinic_set_arm_bit - set arm bit for enable interrupt again
507 * @q_type: type of queue to set the arm bit for
508 * @q_id: the queue number
510 * Return 0 - Success, negative - Failure
512 int hinic_set_arm_bit(struct hinic_cmdqs
*cmdqs
,
513 enum hinic_set_arm_qtype q_type
, u32 q_id
)
515 struct hinic_cmdq
*cmdq
= &cmdqs
->cmdq
[HINIC_CMDQ_SYNC
];
516 struct hinic_hwif
*hwif
= cmdqs
->hwif
;
517 struct pci_dev
*pdev
= hwif
->pdev
;
518 struct hinic_cmdq_arm_bit arm_bit
;
521 arm_bit
.q_type
= q_type
;
524 err
= cmdq_set_arm_bit(cmdq
, &arm_bit
, sizeof(arm_bit
));
526 dev_err(&pdev
->dev
, "Failed to set arm for qid %d\n", q_id
);
533 static void clear_wqe_complete_bit(struct hinic_cmdq
*cmdq
,
534 struct hinic_cmdq_wqe
*wqe
)
536 u32 header_info
= be32_to_cpu(CMDQ_WQE_HEADER(wqe
)->header_info
);
537 unsigned int bufdesc_len
, wqe_size
;
538 struct hinic_ctrl
*ctrl
;
540 bufdesc_len
= HINIC_CMDQ_WQE_HEADER_GET(header_info
, BUFDESC_LEN
);
541 wqe_size
= cmdq_wqe_size_from_bdlen(bufdesc_len
);
542 if (wqe_size
== WQE_LCMD_SIZE
) {
543 struct hinic_cmdq_wqe_lcmd
*wqe_lcmd
= &wqe
->wqe_lcmd
;
545 ctrl
= &wqe_lcmd
->ctrl
;
547 struct hinic_cmdq_direct_wqe
*direct_wqe
= &wqe
->direct_wqe
;
548 struct hinic_cmdq_wqe_scmd
*wqe_scmd
;
550 wqe_scmd
= &direct_wqe
->wqe_scmd
;
551 ctrl
= &wqe_scmd
->ctrl
;
554 /* clear HW busy bit */
557 wmb(); /* verify wqe is clear */
561 * cmdq_arm_ceq_handler - cmdq completion event handler for arm command
562 * @cmdq: the cmdq of the arm command
563 * @wqe: the wqe of the arm command
565 * Return 0 - Success, negative - Failure
567 static int cmdq_arm_ceq_handler(struct hinic_cmdq
*cmdq
,
568 struct hinic_cmdq_wqe
*wqe
)
570 struct hinic_cmdq_direct_wqe
*direct_wqe
= &wqe
->direct_wqe
;
571 struct hinic_cmdq_wqe_scmd
*wqe_scmd
;
572 struct hinic_ctrl
*ctrl
;
575 wqe_scmd
= &direct_wqe
->wqe_scmd
;
576 ctrl
= &wqe_scmd
->ctrl
;
577 ctrl_info
= be32_to_cpu(ctrl
->ctrl_info
);
579 /* HW should toggle the HW BUSY BIT */
580 if (!CMDQ_WQE_COMPLETED(ctrl_info
))
583 clear_wqe_complete_bit(cmdq
, wqe
);
585 hinic_put_wqe(cmdq
->wq
, WQE_SCMD_SIZE
);
589 static void cmdq_update_errcode(struct hinic_cmdq
*cmdq
, u16 prod_idx
,
592 if (cmdq
->errcode
[prod_idx
])
593 *cmdq
->errcode
[prod_idx
] = errcode
;
597 * cmdq_arm_ceq_handler - cmdq completion event handler for sync command
598 * @cmdq: the cmdq of the command
599 * @cons_idx: the consumer index to update the error code for
600 * @errcode: the error code
602 static void cmdq_sync_cmd_handler(struct hinic_cmdq
*cmdq
, u16 cons_idx
,
605 u16 prod_idx
= cons_idx
;
607 spin_lock(&cmdq
->cmdq_lock
);
608 cmdq_update_errcode(cmdq
, prod_idx
, errcode
);
610 wmb(); /* write all before update for the command request */
612 if (cmdq
->done
[prod_idx
])
613 complete(cmdq
->done
[prod_idx
]);
614 spin_unlock(&cmdq
->cmdq_lock
);
617 static int cmdq_cmd_ceq_handler(struct hinic_cmdq
*cmdq
, u16 ci
,
618 struct hinic_cmdq_wqe
*cmdq_wqe
)
620 struct hinic_cmdq_wqe_lcmd
*wqe_lcmd
= &cmdq_wqe
->wqe_lcmd
;
621 struct hinic_status
*status
= &wqe_lcmd
->status
;
622 struct hinic_ctrl
*ctrl
= &wqe_lcmd
->ctrl
;
625 if (!CMDQ_WQE_COMPLETED(be32_to_cpu(ctrl
->ctrl_info
)))
630 errcode
= CMDQ_WQE_ERRCODE_GET(be32_to_cpu(status
->status_info
), VAL
);
632 cmdq_sync_cmd_handler(cmdq
, ci
, errcode
);
634 clear_wqe_complete_bit(cmdq
, cmdq_wqe
);
635 hinic_put_wqe(cmdq
->wq
, WQE_LCMD_SIZE
);
640 * cmdq_ceq_handler - cmdq completion event handler
641 * @handle: private data for the handler(cmdqs)
642 * @ceqe_data: ceq element data
644 static void cmdq_ceq_handler(void *handle
, u32 ceqe_data
)
646 enum hinic_cmdq_type cmdq_type
= CMDQ_CEQE_GET(ceqe_data
, TYPE
);
647 struct hinic_cmdqs
*cmdqs
= (struct hinic_cmdqs
*)handle
;
648 struct hinic_cmdq
*cmdq
= &cmdqs
->cmdq
[cmdq_type
];
649 struct hinic_cmdq_header
*header
;
650 struct hinic_hw_wqe
*hw_wqe
;
651 int err
, set_arm
= 0;
655 /* Read the smallest wqe size for getting wqe size */
656 while ((hw_wqe
= hinic_read_wqe(cmdq
->wq
, WQE_SCMD_SIZE
, &ci
))) {
660 header
= CMDQ_WQE_HEADER(&hw_wqe
->cmdq_wqe
);
661 saved_data
= be32_to_cpu(header
->saved_data
);
663 if (HINIC_SAVED_DATA_GET(saved_data
, ARM
)) {
664 /* arm_bit was set until here */
667 if (cmdq_arm_ceq_handler(cmdq
, &hw_wqe
->cmdq_wqe
))
672 hw_wqe
= hinic_read_wqe(cmdq
->wq
, WQE_LCMD_SIZE
, &ci
);
676 if (cmdq_cmd_ceq_handler(cmdq
, ci
, &hw_wqe
->cmdq_wqe
))
682 struct hinic_hwif
*hwif
= cmdqs
->hwif
;
683 struct pci_dev
*pdev
= hwif
->pdev
;
685 err
= hinic_set_arm_bit(cmdqs
, HINIC_SET_ARM_CMDQ
, cmdq_type
);
687 dev_err(&pdev
->dev
, "Failed to set arm for CMDQ\n");
692 * cmdq_init_queue_ctxt - init the queue ctxt of a cmdq
693 * @cmdq_ctxt: cmdq ctxt to initialize
695 * @cmdq_pages: the memory of the queue
697 static void cmdq_init_queue_ctxt(struct hinic_cmdq_ctxt
*cmdq_ctxt
,
698 struct hinic_cmdq
*cmdq
,
699 struct hinic_cmdq_pages
*cmdq_pages
)
701 struct hinic_cmdq_ctxt_info
*ctxt_info
= &cmdq_ctxt
->ctxt_info
;
702 u64 wq_first_page_paddr
, cmdq_first_block_paddr
, pfn
;
703 struct hinic_cmdqs
*cmdqs
= cmdq_to_cmdqs(cmdq
);
704 struct hinic_wq
*wq
= cmdq
->wq
;
706 /* The data in the HW is in Big Endian Format */
707 wq_first_page_paddr
= be64_to_cpu(*wq
->block_vaddr
);
709 pfn
= CMDQ_PFN(wq_first_page_paddr
, SZ_4K
);
711 ctxt_info
->curr_wqe_page_pfn
=
712 HINIC_CMDQ_CTXT_PAGE_INFO_SET(pfn
, CURR_WQE_PAGE_PFN
) |
713 HINIC_CMDQ_CTXT_PAGE_INFO_SET(HINIC_CEQ_ID_CMDQ
, EQ_ID
) |
714 HINIC_CMDQ_CTXT_PAGE_INFO_SET(1, CEQ_ARM
) |
715 HINIC_CMDQ_CTXT_PAGE_INFO_SET(1, CEQ_EN
) |
716 HINIC_CMDQ_CTXT_PAGE_INFO_SET(cmdq
->wrapped
, WRAPPED
);
718 if (wq
->num_q_pages
!= 1) {
719 /* block PFN - Read Modify Write */
720 cmdq_first_block_paddr
= cmdq_pages
->page_paddr
;
722 pfn
= CMDQ_PFN(cmdq_first_block_paddr
, wq
->wq_page_size
);
725 ctxt_info
->wq_block_pfn
=
726 HINIC_CMDQ_CTXT_BLOCK_INFO_SET(pfn
, WQ_BLOCK_PFN
) |
727 HINIC_CMDQ_CTXT_BLOCK_INFO_SET(atomic_read(&wq
->cons_idx
), CI
);
729 cmdq_ctxt
->func_idx
= HINIC_HWIF_FUNC_IDX(cmdqs
->hwif
);
730 cmdq_ctxt
->ppf_idx
= HINIC_HWIF_PPF_IDX(cmdqs
->hwif
);
731 cmdq_ctxt
->cmdq_type
= cmdq
->cmdq_type
;
735 * init_cmdq - initialize cmdq
737 * @wq: the wq attaced to the cmdq
738 * @q_type: the cmdq type of the cmdq
739 * @db_area: doorbell area for the cmdq
741 * Return 0 - Success, negative - Failure
743 static int init_cmdq(struct hinic_cmdq
*cmdq
, struct hinic_wq
*wq
,
744 enum hinic_cmdq_type q_type
, void __iomem
*db_area
)
749 cmdq
->cmdq_type
= q_type
;
752 spin_lock_init(&cmdq
->cmdq_lock
);
754 cmdq
->done
= vzalloc(array_size(sizeof(*cmdq
->done
), wq
->q_depth
));
758 cmdq
->errcode
= vzalloc(array_size(sizeof(*cmdq
->errcode
),
760 if (!cmdq
->errcode
) {
765 cmdq
->db_base
= db_area
+ CMDQ_DB_OFF
;
774 * free_cmdq - Free cmdq
775 * @cmdq: the cmdq to free
777 static void free_cmdq(struct hinic_cmdq
*cmdq
)
779 vfree(cmdq
->errcode
);
784 * init_cmdqs_ctxt - write the cmdq ctxt to HW after init all cmdq
785 * @hwdev: the NIC HW device
786 * @cmdqs: cmdqs to write the ctxts for
787 * @db_area: db_area for all the cmdqs
789 * Return 0 - Success, negative - Failure
791 static int init_cmdqs_ctxt(struct hinic_hwdev
*hwdev
,
792 struct hinic_cmdqs
*cmdqs
, void __iomem
**db_area
)
794 struct hinic_hwif
*hwif
= hwdev
->hwif
;
795 enum hinic_cmdq_type type
, cmdq_type
;
796 struct hinic_cmdq_ctxt
*cmdq_ctxts
;
797 struct pci_dev
*pdev
= hwif
->pdev
;
798 struct hinic_pfhwdev
*pfhwdev
;
799 size_t cmdq_ctxts_size
;
802 cmdq_ctxts_size
= HINIC_MAX_CMDQ_TYPES
* sizeof(*cmdq_ctxts
);
803 cmdq_ctxts
= devm_kzalloc(&pdev
->dev
, cmdq_ctxts_size
, GFP_KERNEL
);
807 pfhwdev
= container_of(hwdev
, struct hinic_pfhwdev
, hwdev
);
809 cmdq_type
= HINIC_CMDQ_SYNC
;
810 for (; cmdq_type
< HINIC_MAX_CMDQ_TYPES
; cmdq_type
++) {
811 cmdqs
->cmdq
[cmdq_type
].hwdev
= hwdev
;
812 err
= init_cmdq(&cmdqs
->cmdq
[cmdq_type
],
813 &cmdqs
->saved_wqs
[cmdq_type
], cmdq_type
,
816 dev_err(&pdev
->dev
, "Failed to initialize cmdq\n");
820 cmdq_init_queue_ctxt(&cmdq_ctxts
[cmdq_type
],
821 &cmdqs
->cmdq
[cmdq_type
],
825 /* Write the CMDQ ctxts */
826 cmdq_type
= HINIC_CMDQ_SYNC
;
827 for (; cmdq_type
< HINIC_MAX_CMDQ_TYPES
; cmdq_type
++) {
828 err
= hinic_msg_to_mgmt(&pfhwdev
->pf_to_mgmt
, HINIC_MOD_COMM
,
829 HINIC_COMM_CMD_CMDQ_CTXT_SET
,
830 &cmdq_ctxts
[cmdq_type
],
831 sizeof(cmdq_ctxts
[cmdq_type
]),
832 NULL
, NULL
, HINIC_MGMT_MSG_SYNC
);
834 dev_err(&pdev
->dev
, "Failed to set CMDQ CTXT type = %d\n",
836 goto err_write_cmdq_ctxt
;
840 devm_kfree(&pdev
->dev
, cmdq_ctxts
);
844 cmdq_type
= HINIC_MAX_CMDQ_TYPES
;
847 for (type
= HINIC_CMDQ_SYNC
; type
< cmdq_type
; type
++)
848 free_cmdq(&cmdqs
->cmdq
[type
]);
850 devm_kfree(&pdev
->dev
, cmdq_ctxts
);
854 static int hinic_set_cmdq_depth(struct hinic_hwdev
*hwdev
, u16 cmdq_depth
)
856 struct hinic_cmd_hw_ioctxt hw_ioctxt
= { 0 };
857 struct hinic_pfhwdev
*pfhwdev
;
859 pfhwdev
= container_of(hwdev
, struct hinic_pfhwdev
, hwdev
);
861 hw_ioctxt
.func_idx
= HINIC_HWIF_FUNC_IDX(hwdev
->hwif
);
862 hw_ioctxt
.ppf_idx
= HINIC_HWIF_PPF_IDX(hwdev
->hwif
);
864 hw_ioctxt
.set_cmdq_depth
= HW_IOCTXT_SET_CMDQ_DEPTH_ENABLE
;
865 hw_ioctxt
.cmdq_depth
= (u8
)ilog2(cmdq_depth
);
867 return hinic_msg_to_mgmt(&pfhwdev
->pf_to_mgmt
, HINIC_MOD_COMM
,
868 HINIC_COMM_CMD_HWCTXT_SET
,
869 &hw_ioctxt
, sizeof(hw_ioctxt
), NULL
,
870 NULL
, HINIC_MGMT_MSG_SYNC
);
874 * hinic_init_cmdqs - init all cmdqs
875 * @cmdqs: cmdqs to init
876 * @hwif: HW interface for accessing cmdqs
877 * @db_area: doorbell areas for all the cmdqs
879 * Return 0 - Success, negative - Failure
881 int hinic_init_cmdqs(struct hinic_cmdqs
*cmdqs
, struct hinic_hwif
*hwif
,
882 void __iomem
**db_area
)
884 struct hinic_func_to_io
*func_to_io
= cmdqs_to_func_to_io(cmdqs
);
885 struct pci_dev
*pdev
= hwif
->pdev
;
886 struct hinic_hwdev
*hwdev
;
887 size_t saved_wqs_size
;
892 cmdqs
->cmdq_buf_pool
= dma_pool_create("hinic_cmdq", &pdev
->dev
,
894 HINIC_CMDQ_BUF_SIZE
, 0);
895 if (!cmdqs
->cmdq_buf_pool
)
898 saved_wqs_size
= HINIC_MAX_CMDQ_TYPES
* sizeof(struct hinic_wq
);
899 cmdqs
->saved_wqs
= devm_kzalloc(&pdev
->dev
, saved_wqs_size
, GFP_KERNEL
);
900 if (!cmdqs
->saved_wqs
) {
905 max_wqe_size
= WQE_LCMD_SIZE
;
906 err
= hinic_wqs_cmdq_alloc(&cmdqs
->cmdq_pages
, cmdqs
->saved_wqs
, hwif
,
907 HINIC_MAX_CMDQ_TYPES
, CMDQ_WQEBB_SIZE
,
908 CMDQ_WQ_PAGE_SIZE
, CMDQ_DEPTH
, max_wqe_size
);
910 dev_err(&pdev
->dev
, "Failed to allocate CMDQ wqs\n");
914 hwdev
= container_of(func_to_io
, struct hinic_hwdev
, func_to_io
);
915 err
= init_cmdqs_ctxt(hwdev
, cmdqs
, db_area
);
917 dev_err(&pdev
->dev
, "Failed to write cmdq ctxt\n");
921 hinic_ceq_register_cb(&func_to_io
->ceqs
, HINIC_CEQ_CMDQ
, cmdqs
,
924 err
= hinic_set_cmdq_depth(hwdev
, CMDQ_DEPTH
);
926 dev_err(&hwif
->pdev
->dev
, "Failed to set cmdq depth\n");
927 goto err_set_cmdq_depth
;
933 hinic_ceq_unregister_cb(&func_to_io
->ceqs
, HINIC_CEQ_CMDQ
);
936 hinic_wqs_cmdq_free(&cmdqs
->cmdq_pages
, cmdqs
->saved_wqs
,
937 HINIC_MAX_CMDQ_TYPES
);
940 devm_kfree(&pdev
->dev
, cmdqs
->saved_wqs
);
943 dma_pool_destroy(cmdqs
->cmdq_buf_pool
);
948 * hinic_free_cmdqs - free all cmdqs
949 * @cmdqs: cmdqs to free
951 void hinic_free_cmdqs(struct hinic_cmdqs
*cmdqs
)
953 struct hinic_func_to_io
*func_to_io
= cmdqs_to_func_to_io(cmdqs
);
954 struct hinic_hwif
*hwif
= cmdqs
->hwif
;
955 struct pci_dev
*pdev
= hwif
->pdev
;
956 enum hinic_cmdq_type cmdq_type
;
958 hinic_ceq_unregister_cb(&func_to_io
->ceqs
, HINIC_CEQ_CMDQ
);
960 cmdq_type
= HINIC_CMDQ_SYNC
;
961 for (; cmdq_type
< HINIC_MAX_CMDQ_TYPES
; cmdq_type
++)
962 free_cmdq(&cmdqs
->cmdq
[cmdq_type
]);
964 hinic_wqs_cmdq_free(&cmdqs
->cmdq_pages
, cmdqs
->saved_wqs
,
965 HINIC_MAX_CMDQ_TYPES
);
967 devm_kfree(&pdev
->dev
, cmdqs
->saved_wqs
);
969 dma_pool_destroy(cmdqs
->cmdq_buf_pool
);