1 // SPDX-License-Identifier: GPL-2.0-only
3 * Huawei HiNIC PCI Express Linux driver
4 * Copyright(c) 2017 Huawei Technologies Co., Ltd
7 #include <linux/kernel.h>
8 #include <linux/types.h>
9 #include <linux/errno.h>
10 #include <linux/pci.h>
11 #include <linux/device.h>
12 #include <linux/slab.h>
13 #include <linux/vmalloc.h>
14 #include <linux/spinlock.h>
15 #include <linux/sizes.h>
16 #include <linux/atomic.h>
17 #include <linux/log2.h>
19 #include <linux/completion.h>
20 #include <linux/err.h>
21 #include <asm/byteorder.h>
22 #include <asm/barrier.h>
24 #include "hinic_common.h"
25 #include "hinic_hw_if.h"
26 #include "hinic_hw_eqs.h"
27 #include "hinic_hw_mgmt.h"
28 #include "hinic_hw_wqe.h"
29 #include "hinic_hw_wq.h"
30 #include "hinic_hw_cmdq.h"
31 #include "hinic_hw_io.h"
32 #include "hinic_hw_dev.h"
34 #define CMDQ_CEQE_TYPE_SHIFT 0
36 #define CMDQ_CEQE_TYPE_MASK 0x7
38 #define CMDQ_CEQE_GET(val, member) \
39 (((val) >> CMDQ_CEQE_##member##_SHIFT) \
40 & CMDQ_CEQE_##member##_MASK)
42 #define CMDQ_WQE_ERRCODE_VAL_SHIFT 20
44 #define CMDQ_WQE_ERRCODE_VAL_MASK 0xF
46 #define CMDQ_WQE_ERRCODE_GET(val, member) \
47 (((val) >> CMDQ_WQE_ERRCODE_##member##_SHIFT) \
48 & CMDQ_WQE_ERRCODE_##member##_MASK)
50 #define CMDQ_DB_PI_OFF(pi) (((u16)LOWER_8_BITS(pi)) << 3)
52 #define CMDQ_DB_ADDR(db_base, pi) ((db_base) + CMDQ_DB_PI_OFF(pi))
54 #define CMDQ_WQE_HEADER(wqe) ((struct hinic_cmdq_header *)(wqe))
56 #define CMDQ_WQE_COMPLETED(ctrl_info) \
57 HINIC_CMDQ_CTRL_GET(ctrl_info, HW_BUSY_BIT)
59 #define FIRST_DATA_TO_WRITE_LAST sizeof(u64)
61 #define CMDQ_DB_OFF SZ_2K
63 #define CMDQ_WQEBB_SIZE 64
64 #define CMDQ_WQE_SIZE 64
65 #define CMDQ_DEPTH SZ_4K
67 #define CMDQ_WQ_PAGE_SIZE SZ_4K
69 #define WQE_LCMD_SIZE 64
70 #define WQE_SCMD_SIZE 64
72 #define COMPLETE_LEN 3
74 #define CMDQ_TIMEOUT 1000
76 #define CMDQ_PFN(addr, page_size) ((addr) >> (ilog2(page_size)))
78 #define cmdq_to_cmdqs(cmdq) container_of((cmdq) - (cmdq)->cmdq_type, \
79 struct hinic_cmdqs, cmdq[0])
81 #define cmdqs_to_func_to_io(cmdqs) container_of(cmdqs, \
82 struct hinic_func_to_io, \
90 enum completion_format
{
101 BUFDESC_LCMD_LEN
= 2, /* 16 bytes - 2(8 byte unit) */
102 BUFDESC_SCMD_LEN
= 3, /* 24 bytes - 3(8 byte unit) */
106 CTRL_SECT_LEN
= 1, /* 4 bytes (ctrl) - 1(8 byte unit) */
107 CTRL_DIRECT_SECT_LEN
= 2, /* 12 bytes (ctrl + rsvd) - 2(8 byte unit) */
110 enum cmdq_scmd_type
{
111 CMDQ_SET_ARM_CMD
= 2,
115 CMDQ_CMD_SYNC_DIRECT_RESP
= 0,
116 CMDQ_CMD_SYNC_SGE_RESP
= 1,
119 enum completion_request
{
125 * hinic_alloc_cmdq_buf - alloc buffer for sending command
127 * @cmdq_buf: the buffer returned in this struct
129 * Return 0 - Success, negative - Failure
131 int hinic_alloc_cmdq_buf(struct hinic_cmdqs
*cmdqs
,
132 struct hinic_cmdq_buf
*cmdq_buf
)
134 struct hinic_hwif
*hwif
= cmdqs
->hwif
;
135 struct pci_dev
*pdev
= hwif
->pdev
;
137 cmdq_buf
->buf
= dma_pool_alloc(cmdqs
->cmdq_buf_pool
, GFP_KERNEL
,
138 &cmdq_buf
->dma_addr
);
139 if (!cmdq_buf
->buf
) {
140 dev_err(&pdev
->dev
, "Failed to allocate cmd from the pool\n");
148 * hinic_free_cmdq_buf - free buffer
150 * @cmdq_buf: the buffer to free that is in this struct
152 void hinic_free_cmdq_buf(struct hinic_cmdqs
*cmdqs
,
153 struct hinic_cmdq_buf
*cmdq_buf
)
155 dma_pool_free(cmdqs
->cmdq_buf_pool
, cmdq_buf
->buf
, cmdq_buf
->dma_addr
);
158 static unsigned int cmdq_wqe_size_from_bdlen(enum bufdesc_len len
)
160 unsigned int wqe_size
= 0;
163 case BUFDESC_LCMD_LEN
:
164 wqe_size
= WQE_LCMD_SIZE
;
166 case BUFDESC_SCMD_LEN
:
167 wqe_size
= WQE_SCMD_SIZE
;
174 static void cmdq_set_sge_completion(struct hinic_cmdq_completion
*completion
,
175 struct hinic_cmdq_buf
*buf_out
)
177 struct hinic_sge_resp
*sge_resp
= &completion
->sge_resp
;
179 hinic_set_sge(&sge_resp
->sge
, buf_out
->dma_addr
, buf_out
->size
);
182 static void cmdq_prepare_wqe_ctrl(struct hinic_cmdq_wqe
*wqe
, int wrapped
,
183 enum hinic_cmd_ack_type ack_type
,
184 enum hinic_mod_type mod
, u8 cmd
, u16 prod_idx
,
185 enum completion_format complete_format
,
186 enum data_format data_format
,
187 enum bufdesc_len buf_len
)
189 struct hinic_cmdq_wqe_lcmd
*wqe_lcmd
;
190 struct hinic_cmdq_wqe_scmd
*wqe_scmd
;
191 enum ctrl_sect_len ctrl_len
;
192 struct hinic_ctrl
*ctrl
;
195 if (data_format
== DATA_SGE
) {
196 wqe_lcmd
= &wqe
->wqe_lcmd
;
198 wqe_lcmd
->status
.status_info
= 0;
199 ctrl
= &wqe_lcmd
->ctrl
;
200 ctrl_len
= CTRL_SECT_LEN
;
202 wqe_scmd
= &wqe
->direct_wqe
.wqe_scmd
;
204 wqe_scmd
->status
.status_info
= 0;
205 ctrl
= &wqe_scmd
->ctrl
;
206 ctrl_len
= CTRL_DIRECT_SECT_LEN
;
209 ctrl
->ctrl_info
= HINIC_CMDQ_CTRL_SET(prod_idx
, PI
) |
210 HINIC_CMDQ_CTRL_SET(cmd
, CMD
) |
211 HINIC_CMDQ_CTRL_SET(mod
, MOD
) |
212 HINIC_CMDQ_CTRL_SET(ack_type
, ACK_TYPE
);
214 CMDQ_WQE_HEADER(wqe
)->header_info
=
215 HINIC_CMDQ_WQE_HEADER_SET(buf_len
, BUFDESC_LEN
) |
216 HINIC_CMDQ_WQE_HEADER_SET(complete_format
, COMPLETE_FMT
) |
217 HINIC_CMDQ_WQE_HEADER_SET(data_format
, DATA_FMT
) |
218 HINIC_CMDQ_WQE_HEADER_SET(CEQ_SET
, COMPLETE_REQ
) |
219 HINIC_CMDQ_WQE_HEADER_SET(COMPLETE_LEN
, COMPLETE_SECT_LEN
) |
220 HINIC_CMDQ_WQE_HEADER_SET(ctrl_len
, CTRL_LEN
) |
221 HINIC_CMDQ_WQE_HEADER_SET(wrapped
, TOGGLED_WRAPPED
);
223 saved_data
= CMDQ_WQE_HEADER(wqe
)->saved_data
;
224 saved_data
= HINIC_SAVED_DATA_CLEAR(saved_data
, ARM
);
226 if ((cmd
== CMDQ_SET_ARM_CMD
) && (mod
== HINIC_MOD_COMM
))
227 CMDQ_WQE_HEADER(wqe
)->saved_data
|=
228 HINIC_SAVED_DATA_SET(1, ARM
);
230 CMDQ_WQE_HEADER(wqe
)->saved_data
= saved_data
;
233 static void cmdq_set_lcmd_bufdesc(struct hinic_cmdq_wqe_lcmd
*wqe_lcmd
,
234 struct hinic_cmdq_buf
*buf_in
)
236 hinic_set_sge(&wqe_lcmd
->buf_desc
.sge
, buf_in
->dma_addr
, buf_in
->size
);
239 static void cmdq_set_direct_wqe_data(struct hinic_cmdq_direct_wqe
*wqe
,
240 void *buf_in
, u32 in_size
)
242 struct hinic_cmdq_wqe_scmd
*wqe_scmd
= &wqe
->wqe_scmd
;
244 wqe_scmd
->buf_desc
.buf_len
= in_size
;
245 memcpy(wqe_scmd
->buf_desc
.data
, buf_in
, in_size
);
248 static void cmdq_set_lcmd_wqe(struct hinic_cmdq_wqe
*wqe
,
249 enum cmdq_cmd_type cmd_type
,
250 struct hinic_cmdq_buf
*buf_in
,
251 struct hinic_cmdq_buf
*buf_out
, int wrapped
,
252 enum hinic_cmd_ack_type ack_type
,
253 enum hinic_mod_type mod
, u8 cmd
, u16 prod_idx
)
255 struct hinic_cmdq_wqe_lcmd
*wqe_lcmd
= &wqe
->wqe_lcmd
;
256 enum completion_format complete_format
;
259 case CMDQ_CMD_SYNC_SGE_RESP
:
260 complete_format
= COMPLETE_SGE
;
261 cmdq_set_sge_completion(&wqe_lcmd
->completion
, buf_out
);
263 case CMDQ_CMD_SYNC_DIRECT_RESP
:
264 complete_format
= COMPLETE_DIRECT
;
265 wqe_lcmd
->completion
.direct_resp
= 0;
269 cmdq_prepare_wqe_ctrl(wqe
, wrapped
, ack_type
, mod
, cmd
,
270 prod_idx
, complete_format
, DATA_SGE
,
273 cmdq_set_lcmd_bufdesc(wqe_lcmd
, buf_in
);
276 static void cmdq_set_direct_wqe(struct hinic_cmdq_wqe
*wqe
,
277 enum cmdq_cmd_type cmd_type
,
278 void *buf_in
, u16 in_size
,
279 struct hinic_cmdq_buf
*buf_out
, int wrapped
,
280 enum hinic_cmd_ack_type ack_type
,
281 enum hinic_mod_type mod
, u8 cmd
, u16 prod_idx
)
283 struct hinic_cmdq_direct_wqe
*direct_wqe
= &wqe
->direct_wqe
;
284 enum completion_format complete_format
;
285 struct hinic_cmdq_wqe_scmd
*wqe_scmd
;
287 wqe_scmd
= &direct_wqe
->wqe_scmd
;
290 case CMDQ_CMD_SYNC_SGE_RESP
:
291 complete_format
= COMPLETE_SGE
;
292 cmdq_set_sge_completion(&wqe_scmd
->completion
, buf_out
);
294 case CMDQ_CMD_SYNC_DIRECT_RESP
:
295 complete_format
= COMPLETE_DIRECT
;
296 wqe_scmd
->completion
.direct_resp
= 0;
300 cmdq_prepare_wqe_ctrl(wqe
, wrapped
, ack_type
, mod
, cmd
, prod_idx
,
301 complete_format
, DATA_DIRECT
, BUFDESC_SCMD_LEN
);
303 cmdq_set_direct_wqe_data(direct_wqe
, buf_in
, in_size
);
306 static void cmdq_wqe_fill(void *dst
, void *src
)
308 memcpy(dst
+ FIRST_DATA_TO_WRITE_LAST
, src
+ FIRST_DATA_TO_WRITE_LAST
,
309 CMDQ_WQE_SIZE
- FIRST_DATA_TO_WRITE_LAST
);
311 wmb(); /* The first 8 bytes should be written last */
313 *(u64
*)dst
= *(u64
*)src
;
316 static void cmdq_fill_db(u32
*db_info
,
317 enum hinic_cmdq_type cmdq_type
, u16 prod_idx
)
319 *db_info
= HINIC_CMDQ_DB_INFO_SET(UPPER_8_BITS(prod_idx
), HI_PROD_IDX
) |
320 HINIC_CMDQ_DB_INFO_SET(HINIC_CTRL_PATH
, PATH
) |
321 HINIC_CMDQ_DB_INFO_SET(cmdq_type
, CMDQ_TYPE
) |
322 HINIC_CMDQ_DB_INFO_SET(HINIC_DB_CMDQ_TYPE
, DB_TYPE
);
325 static void cmdq_set_db(struct hinic_cmdq
*cmdq
,
326 enum hinic_cmdq_type cmdq_type
, u16 prod_idx
)
330 cmdq_fill_db(&db_info
, cmdq_type
, prod_idx
);
332 /* The data that is written to HW should be in Big Endian Format */
333 db_info
= cpu_to_be32(db_info
);
335 wmb(); /* write all before the doorbell */
337 writel(db_info
, CMDQ_DB_ADDR(cmdq
->db_base
, prod_idx
));
340 static int cmdq_sync_cmd_direct_resp(struct hinic_cmdq
*cmdq
,
341 enum hinic_mod_type mod
, u8 cmd
,
342 struct hinic_cmdq_buf
*buf_in
,
345 struct hinic_cmdq_wqe
*curr_cmdq_wqe
, cmdq_wqe
;
346 u16 curr_prod_idx
, next_prod_idx
;
347 int errcode
, wrapped
, num_wqebbs
;
348 struct hinic_wq
*wq
= cmdq
->wq
;
349 struct hinic_hw_wqe
*hw_wqe
;
350 struct completion done
;
352 /* Keep doorbell index correct. bh - for tasklet(ceq). */
353 spin_lock_bh(&cmdq
->cmdq_lock
);
355 /* WQE_SIZE = WQEBB_SIZE, we will get the wq element and not shadow*/
356 hw_wqe
= hinic_get_wqe(wq
, WQE_LCMD_SIZE
, &curr_prod_idx
);
357 if (IS_ERR(hw_wqe
)) {
358 spin_unlock_bh(&cmdq
->cmdq_lock
);
362 curr_cmdq_wqe
= &hw_wqe
->cmdq_wqe
;
364 wrapped
= cmdq
->wrapped
;
366 num_wqebbs
= ALIGN(WQE_LCMD_SIZE
, wq
->wqebb_size
) / wq
->wqebb_size
;
367 next_prod_idx
= curr_prod_idx
+ num_wqebbs
;
368 if (next_prod_idx
>= wq
->q_depth
) {
369 cmdq
->wrapped
= !cmdq
->wrapped
;
370 next_prod_idx
-= wq
->q_depth
;
373 cmdq
->errcode
[curr_prod_idx
] = &errcode
;
375 init_completion(&done
);
376 cmdq
->done
[curr_prod_idx
] = &done
;
378 cmdq_set_lcmd_wqe(&cmdq_wqe
, CMDQ_CMD_SYNC_DIRECT_RESP
, buf_in
, NULL
,
379 wrapped
, HINIC_CMD_ACK_TYPE_CMDQ
, mod
, cmd
,
382 /* The data that is written to HW should be in Big Endian Format */
383 hinic_cpu_to_be32(&cmdq_wqe
, WQE_LCMD_SIZE
);
385 /* CMDQ WQE is not shadow, therefore wqe will be written to wq */
386 cmdq_wqe_fill(curr_cmdq_wqe
, &cmdq_wqe
);
388 cmdq_set_db(cmdq
, HINIC_CMDQ_SYNC
, next_prod_idx
);
390 spin_unlock_bh(&cmdq
->cmdq_lock
);
392 if (!wait_for_completion_timeout(&done
, CMDQ_TIMEOUT
)) {
393 spin_lock_bh(&cmdq
->cmdq_lock
);
395 if (cmdq
->errcode
[curr_prod_idx
] == &errcode
)
396 cmdq
->errcode
[curr_prod_idx
] = NULL
;
398 if (cmdq
->done
[curr_prod_idx
] == &done
)
399 cmdq
->done
[curr_prod_idx
] = NULL
;
401 spin_unlock_bh(&cmdq
->cmdq_lock
);
406 smp_rmb(); /* read error code after completion */
409 struct hinic_cmdq_wqe_lcmd
*wqe_lcmd
= &curr_cmdq_wqe
->wqe_lcmd
;
411 *resp
= cpu_to_be64(wqe_lcmd
->completion
.direct_resp
);
420 static int cmdq_set_arm_bit(struct hinic_cmdq
*cmdq
, void *buf_in
,
423 struct hinic_cmdq_wqe
*curr_cmdq_wqe
, cmdq_wqe
;
424 u16 curr_prod_idx
, next_prod_idx
;
425 struct hinic_wq
*wq
= cmdq
->wq
;
426 struct hinic_hw_wqe
*hw_wqe
;
427 int wrapped
, num_wqebbs
;
429 /* Keep doorbell index correct */
430 spin_lock(&cmdq
->cmdq_lock
);
432 /* WQE_SIZE = WQEBB_SIZE, we will get the wq element and not shadow*/
433 hw_wqe
= hinic_get_wqe(wq
, WQE_SCMD_SIZE
, &curr_prod_idx
);
434 if (IS_ERR(hw_wqe
)) {
435 spin_unlock(&cmdq
->cmdq_lock
);
439 curr_cmdq_wqe
= &hw_wqe
->cmdq_wqe
;
441 wrapped
= cmdq
->wrapped
;
443 num_wqebbs
= ALIGN(WQE_SCMD_SIZE
, wq
->wqebb_size
) / wq
->wqebb_size
;
444 next_prod_idx
= curr_prod_idx
+ num_wqebbs
;
445 if (next_prod_idx
>= wq
->q_depth
) {
446 cmdq
->wrapped
= !cmdq
->wrapped
;
447 next_prod_idx
-= wq
->q_depth
;
450 cmdq_set_direct_wqe(&cmdq_wqe
, CMDQ_CMD_SYNC_DIRECT_RESP
, buf_in
,
451 in_size
, NULL
, wrapped
, HINIC_CMD_ACK_TYPE_CMDQ
,
452 HINIC_MOD_COMM
, CMDQ_SET_ARM_CMD
, curr_prod_idx
);
454 /* The data that is written to HW should be in Big Endian Format */
455 hinic_cpu_to_be32(&cmdq_wqe
, WQE_SCMD_SIZE
);
457 /* cmdq wqe is not shadow, therefore wqe will be written to wq */
458 cmdq_wqe_fill(curr_cmdq_wqe
, &cmdq_wqe
);
460 cmdq_set_db(cmdq
, HINIC_CMDQ_SYNC
, next_prod_idx
);
462 spin_unlock(&cmdq
->cmdq_lock
);
466 static int cmdq_params_valid(struct hinic_cmdq_buf
*buf_in
)
468 if (buf_in
->size
> HINIC_CMDQ_MAX_DATA_SIZE
)
475 * hinic_cmdq_direct_resp - send command with direct data as resp
477 * @mod: module on the card that will handle the command
479 * @buf_in: the buffer for the command
480 * @resp: the response to return
482 * Return 0 - Success, negative - Failure
484 int hinic_cmdq_direct_resp(struct hinic_cmdqs
*cmdqs
,
485 enum hinic_mod_type mod
, u8 cmd
,
486 struct hinic_cmdq_buf
*buf_in
, u64
*resp
)
488 struct hinic_hwif
*hwif
= cmdqs
->hwif
;
489 struct pci_dev
*pdev
= hwif
->pdev
;
492 err
= cmdq_params_valid(buf_in
);
494 dev_err(&pdev
->dev
, "Invalid CMDQ parameters\n");
498 return cmdq_sync_cmd_direct_resp(&cmdqs
->cmdq
[HINIC_CMDQ_SYNC
],
499 mod
, cmd
, buf_in
, resp
);
503 * hinic_set_arm_bit - set arm bit for enable interrupt again
505 * @q_type: type of queue to set the arm bit for
506 * @q_id: the queue number
508 * Return 0 - Success, negative - Failure
510 int hinic_set_arm_bit(struct hinic_cmdqs
*cmdqs
,
511 enum hinic_set_arm_qtype q_type
, u32 q_id
)
513 struct hinic_cmdq
*cmdq
= &cmdqs
->cmdq
[HINIC_CMDQ_SYNC
];
514 struct hinic_hwif
*hwif
= cmdqs
->hwif
;
515 struct pci_dev
*pdev
= hwif
->pdev
;
516 struct hinic_cmdq_arm_bit arm_bit
;
519 arm_bit
.q_type
= q_type
;
522 err
= cmdq_set_arm_bit(cmdq
, &arm_bit
, sizeof(arm_bit
));
524 dev_err(&pdev
->dev
, "Failed to set arm for qid %d\n", q_id
);
531 static void clear_wqe_complete_bit(struct hinic_cmdq
*cmdq
,
532 struct hinic_cmdq_wqe
*wqe
)
534 u32 header_info
= be32_to_cpu(CMDQ_WQE_HEADER(wqe
)->header_info
);
535 unsigned int bufdesc_len
, wqe_size
;
536 struct hinic_ctrl
*ctrl
;
538 bufdesc_len
= HINIC_CMDQ_WQE_HEADER_GET(header_info
, BUFDESC_LEN
);
539 wqe_size
= cmdq_wqe_size_from_bdlen(bufdesc_len
);
540 if (wqe_size
== WQE_LCMD_SIZE
) {
541 struct hinic_cmdq_wqe_lcmd
*wqe_lcmd
= &wqe
->wqe_lcmd
;
543 ctrl
= &wqe_lcmd
->ctrl
;
545 struct hinic_cmdq_direct_wqe
*direct_wqe
= &wqe
->direct_wqe
;
546 struct hinic_cmdq_wqe_scmd
*wqe_scmd
;
548 wqe_scmd
= &direct_wqe
->wqe_scmd
;
549 ctrl
= &wqe_scmd
->ctrl
;
552 /* clear HW busy bit */
555 wmb(); /* verify wqe is clear */
559 * cmdq_arm_ceq_handler - cmdq completion event handler for arm command
560 * @cmdq: the cmdq of the arm command
561 * @wqe: the wqe of the arm command
563 * Return 0 - Success, negative - Failure
565 static int cmdq_arm_ceq_handler(struct hinic_cmdq
*cmdq
,
566 struct hinic_cmdq_wqe
*wqe
)
568 struct hinic_cmdq_direct_wqe
*direct_wqe
= &wqe
->direct_wqe
;
569 struct hinic_cmdq_wqe_scmd
*wqe_scmd
;
570 struct hinic_ctrl
*ctrl
;
573 wqe_scmd
= &direct_wqe
->wqe_scmd
;
574 ctrl
= &wqe_scmd
->ctrl
;
575 ctrl_info
= be32_to_cpu(ctrl
->ctrl_info
);
577 /* HW should toggle the HW BUSY BIT */
578 if (!CMDQ_WQE_COMPLETED(ctrl_info
))
581 clear_wqe_complete_bit(cmdq
, wqe
);
583 hinic_put_wqe(cmdq
->wq
, WQE_SCMD_SIZE
);
587 static void cmdq_update_errcode(struct hinic_cmdq
*cmdq
, u16 prod_idx
,
590 if (cmdq
->errcode
[prod_idx
])
591 *cmdq
->errcode
[prod_idx
] = errcode
;
595 * cmdq_arm_ceq_handler - cmdq completion event handler for sync command
596 * @cmdq: the cmdq of the command
597 * @cons_idx: the consumer index to update the error code for
598 * @errcode: the error code
600 static void cmdq_sync_cmd_handler(struct hinic_cmdq
*cmdq
, u16 cons_idx
,
603 u16 prod_idx
= cons_idx
;
605 spin_lock(&cmdq
->cmdq_lock
);
606 cmdq_update_errcode(cmdq
, prod_idx
, errcode
);
608 wmb(); /* write all before update for the command request */
610 if (cmdq
->done
[prod_idx
])
611 complete(cmdq
->done
[prod_idx
]);
612 spin_unlock(&cmdq
->cmdq_lock
);
615 static int cmdq_cmd_ceq_handler(struct hinic_cmdq
*cmdq
, u16 ci
,
616 struct hinic_cmdq_wqe
*cmdq_wqe
)
618 struct hinic_cmdq_wqe_lcmd
*wqe_lcmd
= &cmdq_wqe
->wqe_lcmd
;
619 struct hinic_status
*status
= &wqe_lcmd
->status
;
620 struct hinic_ctrl
*ctrl
= &wqe_lcmd
->ctrl
;
623 if (!CMDQ_WQE_COMPLETED(be32_to_cpu(ctrl
->ctrl_info
)))
626 errcode
= CMDQ_WQE_ERRCODE_GET(be32_to_cpu(status
->status_info
), VAL
);
628 cmdq_sync_cmd_handler(cmdq
, ci
, errcode
);
630 clear_wqe_complete_bit(cmdq
, cmdq_wqe
);
631 hinic_put_wqe(cmdq
->wq
, WQE_LCMD_SIZE
);
636 * cmdq_ceq_handler - cmdq completion event handler
637 * @handle: private data for the handler(cmdqs)
638 * @ceqe_data: ceq element data
640 static void cmdq_ceq_handler(void *handle
, u32 ceqe_data
)
642 enum hinic_cmdq_type cmdq_type
= CMDQ_CEQE_GET(ceqe_data
, TYPE
);
643 struct hinic_cmdqs
*cmdqs
= (struct hinic_cmdqs
*)handle
;
644 struct hinic_cmdq
*cmdq
= &cmdqs
->cmdq
[cmdq_type
];
645 struct hinic_cmdq_header
*header
;
646 struct hinic_hw_wqe
*hw_wqe
;
647 int err
, set_arm
= 0;
651 /* Read the smallest wqe size for getting wqe size */
652 while ((hw_wqe
= hinic_read_wqe(cmdq
->wq
, WQE_SCMD_SIZE
, &ci
))) {
656 header
= CMDQ_WQE_HEADER(&hw_wqe
->cmdq_wqe
);
657 saved_data
= be32_to_cpu(header
->saved_data
);
659 if (HINIC_SAVED_DATA_GET(saved_data
, ARM
)) {
660 /* arm_bit was set until here */
663 if (cmdq_arm_ceq_handler(cmdq
, &hw_wqe
->cmdq_wqe
))
668 hw_wqe
= hinic_read_wqe(cmdq
->wq
, WQE_LCMD_SIZE
, &ci
);
672 if (cmdq_cmd_ceq_handler(cmdq
, ci
, &hw_wqe
->cmdq_wqe
))
678 struct hinic_hwif
*hwif
= cmdqs
->hwif
;
679 struct pci_dev
*pdev
= hwif
->pdev
;
681 err
= hinic_set_arm_bit(cmdqs
, HINIC_SET_ARM_CMDQ
, cmdq_type
);
683 dev_err(&pdev
->dev
, "Failed to set arm for CMDQ\n");
688 * cmdq_init_queue_ctxt - init the queue ctxt of a cmdq
689 * @cmdq_ctxt: cmdq ctxt to initialize
691 * @cmdq_pages: the memory of the queue
693 static void cmdq_init_queue_ctxt(struct hinic_cmdq_ctxt
*cmdq_ctxt
,
694 struct hinic_cmdq
*cmdq
,
695 struct hinic_cmdq_pages
*cmdq_pages
)
697 struct hinic_cmdq_ctxt_info
*ctxt_info
= &cmdq_ctxt
->ctxt_info
;
698 u64 wq_first_page_paddr
, cmdq_first_block_paddr
, pfn
;
699 struct hinic_cmdqs
*cmdqs
= cmdq_to_cmdqs(cmdq
);
700 struct hinic_wq
*wq
= cmdq
->wq
;
702 /* The data in the HW is in Big Endian Format */
703 wq_first_page_paddr
= be64_to_cpu(*wq
->block_vaddr
);
705 pfn
= CMDQ_PFN(wq_first_page_paddr
, wq
->wq_page_size
);
707 ctxt_info
->curr_wqe_page_pfn
=
708 HINIC_CMDQ_CTXT_PAGE_INFO_SET(pfn
, CURR_WQE_PAGE_PFN
) |
709 HINIC_CMDQ_CTXT_PAGE_INFO_SET(HINIC_CEQ_ID_CMDQ
, EQ_ID
) |
710 HINIC_CMDQ_CTXT_PAGE_INFO_SET(1, CEQ_ARM
) |
711 HINIC_CMDQ_CTXT_PAGE_INFO_SET(1, CEQ_EN
) |
712 HINIC_CMDQ_CTXT_PAGE_INFO_SET(cmdq
->wrapped
, WRAPPED
);
714 /* block PFN - Read Modify Write */
715 cmdq_first_block_paddr
= cmdq_pages
->page_paddr
;
717 pfn
= CMDQ_PFN(cmdq_first_block_paddr
, wq
->wq_page_size
);
719 ctxt_info
->wq_block_pfn
=
720 HINIC_CMDQ_CTXT_BLOCK_INFO_SET(pfn
, WQ_BLOCK_PFN
) |
721 HINIC_CMDQ_CTXT_BLOCK_INFO_SET(atomic_read(&wq
->cons_idx
), CI
);
723 cmdq_ctxt
->func_idx
= HINIC_HWIF_FUNC_IDX(cmdqs
->hwif
);
724 cmdq_ctxt
->cmdq_type
= cmdq
->cmdq_type
;
728 * init_cmdq - initialize cmdq
730 * @wq: the wq attaced to the cmdq
731 * @q_type: the cmdq type of the cmdq
732 * @db_area: doorbell area for the cmdq
734 * Return 0 - Success, negative - Failure
736 static int init_cmdq(struct hinic_cmdq
*cmdq
, struct hinic_wq
*wq
,
737 enum hinic_cmdq_type q_type
, void __iomem
*db_area
)
742 cmdq
->cmdq_type
= q_type
;
745 spin_lock_init(&cmdq
->cmdq_lock
);
747 cmdq
->done
= vzalloc(array_size(sizeof(*cmdq
->done
), wq
->q_depth
));
751 cmdq
->errcode
= vzalloc(array_size(sizeof(*cmdq
->errcode
),
753 if (!cmdq
->errcode
) {
758 cmdq
->db_base
= db_area
+ CMDQ_DB_OFF
;
767 * free_cmdq - Free cmdq
768 * @cmdq: the cmdq to free
770 static void free_cmdq(struct hinic_cmdq
*cmdq
)
772 vfree(cmdq
->errcode
);
777 * init_cmdqs_ctxt - write the cmdq ctxt to HW after init all cmdq
778 * @hwdev: the NIC HW device
779 * @cmdqs: cmdqs to write the ctxts for
780 * &db_area: db_area for all the cmdqs
782 * Return 0 - Success, negative - Failure
784 static int init_cmdqs_ctxt(struct hinic_hwdev
*hwdev
,
785 struct hinic_cmdqs
*cmdqs
, void __iomem
**db_area
)
787 struct hinic_hwif
*hwif
= hwdev
->hwif
;
788 enum hinic_cmdq_type type
, cmdq_type
;
789 struct hinic_cmdq_ctxt
*cmdq_ctxts
;
790 struct pci_dev
*pdev
= hwif
->pdev
;
791 struct hinic_pfhwdev
*pfhwdev
;
792 size_t cmdq_ctxts_size
;
795 if (!HINIC_IS_PF(hwif
) && !HINIC_IS_PPF(hwif
)) {
796 dev_err(&pdev
->dev
, "Unsupported PCI function type\n");
800 cmdq_ctxts_size
= HINIC_MAX_CMDQ_TYPES
* sizeof(*cmdq_ctxts
);
801 cmdq_ctxts
= devm_kzalloc(&pdev
->dev
, cmdq_ctxts_size
, GFP_KERNEL
);
805 pfhwdev
= container_of(hwdev
, struct hinic_pfhwdev
, hwdev
);
807 cmdq_type
= HINIC_CMDQ_SYNC
;
808 for (; cmdq_type
< HINIC_MAX_CMDQ_TYPES
; cmdq_type
++) {
809 err
= init_cmdq(&cmdqs
->cmdq
[cmdq_type
],
810 &cmdqs
->saved_wqs
[cmdq_type
], cmdq_type
,
813 dev_err(&pdev
->dev
, "Failed to initialize cmdq\n");
817 cmdq_init_queue_ctxt(&cmdq_ctxts
[cmdq_type
],
818 &cmdqs
->cmdq
[cmdq_type
],
822 /* Write the CMDQ ctxts */
823 cmdq_type
= HINIC_CMDQ_SYNC
;
824 for (; cmdq_type
< HINIC_MAX_CMDQ_TYPES
; cmdq_type
++) {
825 err
= hinic_msg_to_mgmt(&pfhwdev
->pf_to_mgmt
, HINIC_MOD_COMM
,
826 HINIC_COMM_CMD_CMDQ_CTXT_SET
,
827 &cmdq_ctxts
[cmdq_type
],
828 sizeof(cmdq_ctxts
[cmdq_type
]),
829 NULL
, NULL
, HINIC_MGMT_MSG_SYNC
);
831 dev_err(&pdev
->dev
, "Failed to set CMDQ CTXT type = %d\n",
833 goto err_write_cmdq_ctxt
;
837 devm_kfree(&pdev
->dev
, cmdq_ctxts
);
841 cmdq_type
= HINIC_MAX_CMDQ_TYPES
;
844 for (type
= HINIC_CMDQ_SYNC
; type
< cmdq_type
; type
++)
845 free_cmdq(&cmdqs
->cmdq
[type
]);
847 devm_kfree(&pdev
->dev
, cmdq_ctxts
);
852 * hinic_init_cmdqs - init all cmdqs
853 * @cmdqs: cmdqs to init
854 * @hwif: HW interface for accessing cmdqs
855 * @db_area: doorbell areas for all the cmdqs
857 * Return 0 - Success, negative - Failure
859 int hinic_init_cmdqs(struct hinic_cmdqs
*cmdqs
, struct hinic_hwif
*hwif
,
860 void __iomem
**db_area
)
862 struct hinic_func_to_io
*func_to_io
= cmdqs_to_func_to_io(cmdqs
);
863 struct pci_dev
*pdev
= hwif
->pdev
;
864 struct hinic_hwdev
*hwdev
;
865 size_t saved_wqs_size
;
870 cmdqs
->cmdq_buf_pool
= dma_pool_create("hinic_cmdq", &pdev
->dev
,
872 HINIC_CMDQ_BUF_SIZE
, 0);
873 if (!cmdqs
->cmdq_buf_pool
)
876 saved_wqs_size
= HINIC_MAX_CMDQ_TYPES
* sizeof(struct hinic_wq
);
877 cmdqs
->saved_wqs
= devm_kzalloc(&pdev
->dev
, saved_wqs_size
, GFP_KERNEL
);
878 if (!cmdqs
->saved_wqs
) {
883 max_wqe_size
= WQE_LCMD_SIZE
;
884 err
= hinic_wqs_cmdq_alloc(&cmdqs
->cmdq_pages
, cmdqs
->saved_wqs
, hwif
,
885 HINIC_MAX_CMDQ_TYPES
, CMDQ_WQEBB_SIZE
,
886 CMDQ_WQ_PAGE_SIZE
, CMDQ_DEPTH
, max_wqe_size
);
888 dev_err(&pdev
->dev
, "Failed to allocate CMDQ wqs\n");
892 hwdev
= container_of(func_to_io
, struct hinic_hwdev
, func_to_io
);
893 err
= init_cmdqs_ctxt(hwdev
, cmdqs
, db_area
);
895 dev_err(&pdev
->dev
, "Failed to write cmdq ctxt\n");
899 hinic_ceq_register_cb(&func_to_io
->ceqs
, HINIC_CEQ_CMDQ
, cmdqs
,
904 hinic_wqs_cmdq_free(&cmdqs
->cmdq_pages
, cmdqs
->saved_wqs
,
905 HINIC_MAX_CMDQ_TYPES
);
908 devm_kfree(&pdev
->dev
, cmdqs
->saved_wqs
);
911 dma_pool_destroy(cmdqs
->cmdq_buf_pool
);
916 * hinic_free_cmdqs - free all cmdqs
917 * @cmdqs: cmdqs to free
919 void hinic_free_cmdqs(struct hinic_cmdqs
*cmdqs
)
921 struct hinic_func_to_io
*func_to_io
= cmdqs_to_func_to_io(cmdqs
);
922 struct hinic_hwif
*hwif
= cmdqs
->hwif
;
923 struct pci_dev
*pdev
= hwif
->pdev
;
924 enum hinic_cmdq_type cmdq_type
;
926 hinic_ceq_unregister_cb(&func_to_io
->ceqs
, HINIC_CEQ_CMDQ
);
928 cmdq_type
= HINIC_CMDQ_SYNC
;
929 for (; cmdq_type
< HINIC_MAX_CMDQ_TYPES
; cmdq_type
++)
930 free_cmdq(&cmdqs
->cmdq
[cmdq_type
]);
932 hinic_wqs_cmdq_free(&cmdqs
->cmdq_pages
, cmdqs
->saved_wqs
,
933 HINIC_MAX_CMDQ_TYPES
);
935 devm_kfree(&pdev
->dev
, cmdqs
->saved_wqs
);
937 dma_pool_destroy(cmdqs
->cmdq_buf_pool
);