1 // SPDX-License-Identifier: GPL-2.0-only
3 * Huawei HiNIC PCI Express Linux driver
4 * Copyright(c) 2017 Huawei Technologies Co., Ltd
7 #include <linux/kernel.h>
8 #include <linux/types.h>
10 #include <linux/device.h>
11 #include <linux/dma-mapping.h>
12 #include <linux/slab.h>
13 #include <linux/atomic.h>
14 #include <linux/semaphore.h>
15 #include <linux/errno.h>
16 #include <linux/vmalloc.h>
17 #include <linux/err.h>
18 #include <asm/byteorder.h>
20 #include "hinic_hw_if.h"
21 #include "hinic_hw_wqe.h"
22 #include "hinic_hw_wq.h"
23 #include "hinic_hw_cmdq.h"
25 #define WQS_BLOCKS_PER_PAGE 4
27 #define WQ_BLOCK_SIZE 4096
28 #define WQS_PAGE_SIZE (WQS_BLOCKS_PER_PAGE * WQ_BLOCK_SIZE)
30 #define WQS_MAX_NUM_BLOCKS 128
31 #define WQS_FREE_BLOCKS_SIZE(wqs) (WQS_MAX_NUM_BLOCKS * \
32 sizeof((wqs)->free_blocks[0]))
34 #define WQ_SIZE(wq) ((wq)->q_depth * (wq)->wqebb_size)
36 #define WQ_PAGE_ADDR_SIZE sizeof(u64)
37 #define WQ_MAX_PAGES (WQ_BLOCK_SIZE / WQ_PAGE_ADDR_SIZE)
39 #define CMDQ_BLOCK_SIZE 512
40 #define CMDQ_PAGE_SIZE 4096
42 #define CMDQ_WQ_MAX_PAGES (CMDQ_BLOCK_SIZE / WQ_PAGE_ADDR_SIZE)
44 #define WQ_BASE_VADDR(wqs, wq) \
45 ((void *)((wqs)->page_vaddr[(wq)->page_idx]) \
46 + (wq)->block_idx * WQ_BLOCK_SIZE)
48 #define WQ_BASE_PADDR(wqs, wq) \
49 ((wqs)->page_paddr[(wq)->page_idx] \
50 + (wq)->block_idx * WQ_BLOCK_SIZE)
52 #define WQ_BASE_ADDR(wqs, wq) \
53 ((void *)((wqs)->shadow_page_vaddr[(wq)->page_idx]) \
54 + (wq)->block_idx * WQ_BLOCK_SIZE)
56 #define CMDQ_BASE_VADDR(cmdq_pages, wq) \
57 ((void *)((cmdq_pages)->page_vaddr) \
58 + (wq)->block_idx * CMDQ_BLOCK_SIZE)
60 #define CMDQ_BASE_PADDR(cmdq_pages, wq) \
61 ((cmdq_pages)->page_paddr \
62 + (wq)->block_idx * CMDQ_BLOCK_SIZE)
64 #define CMDQ_BASE_ADDR(cmdq_pages, wq) \
65 ((void *)((cmdq_pages)->shadow_page_vaddr) \
66 + (wq)->block_idx * CMDQ_BLOCK_SIZE)
68 #define WQ_PAGE_ADDR(wq, idx) \
69 ((wq)->shadow_block_vaddr[WQE_PAGE_NUM(wq, idx)])
71 #define MASKED_WQE_IDX(wq, idx) ((idx) & (wq)->mask)
73 #define WQE_IN_RANGE(wqe, start, end) \
74 (((unsigned long)(wqe) >= (unsigned long)(start)) && \
75 ((unsigned long)(wqe) < (unsigned long)(end)))
77 #define WQE_SHADOW_PAGE(wq, wqe) \
78 (((unsigned long)(wqe) - (unsigned long)(wq)->shadow_wqe) \
81 static inline int WQE_PAGE_OFF(struct hinic_wq
*wq
, u16 idx
)
83 return (((idx
) & ((wq
)->num_wqebbs_per_page
- 1))
84 << (wq
)->wqebb_size_shift
);
87 static inline int WQE_PAGE_NUM(struct hinic_wq
*wq
, u16 idx
)
89 return (((idx
) >> ((wq
)->wqebbs_per_page_shift
))
90 & ((wq
)->num_q_pages
- 1));
93 * queue_alloc_page - allocate page for Queue
94 * @hwif: HW interface for allocating DMA
95 * @vaddr: virtual address will be returned in this address
96 * @paddr: physical address will be returned in this address
97 * @shadow_vaddr: VM area will be return here for holding WQ page addresses
98 * @page_sz: page size of each WQ page
100 * Return 0 - Success, negative - Failure
102 static int queue_alloc_page(struct hinic_hwif
*hwif
, u64
**vaddr
, u64
*paddr
,
103 void ***shadow_vaddr
, size_t page_sz
)
105 struct pci_dev
*pdev
= hwif
->pdev
;
108 *vaddr
= dma_alloc_coherent(&pdev
->dev
, page_sz
, &dma_addr
,
111 dev_err(&pdev
->dev
, "Failed to allocate dma for wqs page\n");
115 *paddr
= (u64
)dma_addr
;
117 /* use vzalloc for big mem */
118 *shadow_vaddr
= vzalloc(page_sz
);
120 goto err_shadow_vaddr
;
125 dma_free_coherent(&pdev
->dev
, page_sz
, *vaddr
, dma_addr
);
130 * wqs_allocate_page - allocate page for WQ set
131 * @wqs: Work Queue Set
132 * @page_idx: the page index of the page will be allocated
134 * Return 0 - Success, negative - Failure
136 static int wqs_allocate_page(struct hinic_wqs
*wqs
, int page_idx
)
138 return queue_alloc_page(wqs
->hwif
, &wqs
->page_vaddr
[page_idx
],
139 &wqs
->page_paddr
[page_idx
],
140 &wqs
->shadow_page_vaddr
[page_idx
],
145 * wqs_free_page - free page of WQ set
146 * @wqs: Work Queue Set
147 * @page_idx: the page index of the page will be freed
149 static void wqs_free_page(struct hinic_wqs
*wqs
, int page_idx
)
151 struct hinic_hwif
*hwif
= wqs
->hwif
;
152 struct pci_dev
*pdev
= hwif
->pdev
;
154 dma_free_coherent(&pdev
->dev
, WQS_PAGE_SIZE
,
155 wqs
->page_vaddr
[page_idx
],
156 (dma_addr_t
)wqs
->page_paddr
[page_idx
]);
157 vfree(wqs
->shadow_page_vaddr
[page_idx
]);
161 * cmdq_allocate_page - allocate page for cmdq
162 * @cmdq_pages: the pages of the cmdq queue struct to hold the page
164 * Return 0 - Success, negative - Failure
166 static int cmdq_allocate_page(struct hinic_cmdq_pages
*cmdq_pages
)
168 return queue_alloc_page(cmdq_pages
->hwif
, &cmdq_pages
->page_vaddr
,
169 &cmdq_pages
->page_paddr
,
170 &cmdq_pages
->shadow_page_vaddr
,
175 * cmdq_free_page - free page from cmdq
176 * @cmdq_pages: the pages of the cmdq queue struct that hold the page
178 * Return 0 - Success, negative - Failure
180 static void cmdq_free_page(struct hinic_cmdq_pages
*cmdq_pages
)
182 struct hinic_hwif
*hwif
= cmdq_pages
->hwif
;
183 struct pci_dev
*pdev
= hwif
->pdev
;
185 dma_free_coherent(&pdev
->dev
, CMDQ_PAGE_SIZE
,
186 cmdq_pages
->page_vaddr
,
187 (dma_addr_t
)cmdq_pages
->page_paddr
);
188 vfree(cmdq_pages
->shadow_page_vaddr
);
191 static int alloc_page_arrays(struct hinic_wqs
*wqs
)
193 struct hinic_hwif
*hwif
= wqs
->hwif
;
194 struct pci_dev
*pdev
= hwif
->pdev
;
197 size
= wqs
->num_pages
* sizeof(*wqs
->page_paddr
);
198 wqs
->page_paddr
= devm_kzalloc(&pdev
->dev
, size
, GFP_KERNEL
);
199 if (!wqs
->page_paddr
)
202 size
= wqs
->num_pages
* sizeof(*wqs
->page_vaddr
);
203 wqs
->page_vaddr
= devm_kzalloc(&pdev
->dev
, size
, GFP_KERNEL
);
204 if (!wqs
->page_vaddr
)
207 size
= wqs
->num_pages
* sizeof(*wqs
->shadow_page_vaddr
);
208 wqs
->shadow_page_vaddr
= devm_kzalloc(&pdev
->dev
, size
, GFP_KERNEL
);
209 if (!wqs
->shadow_page_vaddr
)
210 goto err_page_shadow_vaddr
;
214 err_page_shadow_vaddr
:
215 devm_kfree(&pdev
->dev
, wqs
->page_vaddr
);
218 devm_kfree(&pdev
->dev
, wqs
->page_paddr
);
222 static void free_page_arrays(struct hinic_wqs
*wqs
)
224 struct hinic_hwif
*hwif
= wqs
->hwif
;
225 struct pci_dev
*pdev
= hwif
->pdev
;
227 devm_kfree(&pdev
->dev
, wqs
->shadow_page_vaddr
);
228 devm_kfree(&pdev
->dev
, wqs
->page_vaddr
);
229 devm_kfree(&pdev
->dev
, wqs
->page_paddr
);
232 static int wqs_next_block(struct hinic_wqs
*wqs
, int *page_idx
,
237 down(&wqs
->alloc_blocks_lock
);
239 wqs
->num_free_blks
--;
241 if (wqs
->num_free_blks
< 0) {
242 wqs
->num_free_blks
++;
243 up(&wqs
->alloc_blocks_lock
);
247 pos
= wqs
->alloc_blk_pos
++;
248 pos
&= WQS_MAX_NUM_BLOCKS
- 1;
250 *page_idx
= wqs
->free_blocks
[pos
].page_idx
;
251 *block_idx
= wqs
->free_blocks
[pos
].block_idx
;
253 wqs
->free_blocks
[pos
].page_idx
= -1;
254 wqs
->free_blocks
[pos
].block_idx
= -1;
256 up(&wqs
->alloc_blocks_lock
);
260 static void wqs_return_block(struct hinic_wqs
*wqs
, int page_idx
,
265 down(&wqs
->alloc_blocks_lock
);
267 pos
= wqs
->return_blk_pos
++;
268 pos
&= WQS_MAX_NUM_BLOCKS
- 1;
270 wqs
->free_blocks
[pos
].page_idx
= page_idx
;
271 wqs
->free_blocks
[pos
].block_idx
= block_idx
;
273 wqs
->num_free_blks
++;
275 up(&wqs
->alloc_blocks_lock
);
278 static void init_wqs_blocks_arr(struct hinic_wqs
*wqs
)
280 int page_idx
, blk_idx
, pos
= 0;
282 for (page_idx
= 0; page_idx
< wqs
->num_pages
; page_idx
++) {
283 for (blk_idx
= 0; blk_idx
< WQS_BLOCKS_PER_PAGE
; blk_idx
++) {
284 wqs
->free_blocks
[pos
].page_idx
= page_idx
;
285 wqs
->free_blocks
[pos
].block_idx
= blk_idx
;
290 wqs
->alloc_blk_pos
= 0;
291 wqs
->return_blk_pos
= pos
;
292 wqs
->num_free_blks
= pos
;
294 sema_init(&wqs
->alloc_blocks_lock
, 1);
298 * hinic_wqs_alloc - allocate Work Queues set
299 * @wqs: Work Queue Set
300 * @max_wqs: maximum wqs to allocate
301 * @hwif: HW interface for use for the allocation
303 * Return 0 - Success, negative - Failure
305 int hinic_wqs_alloc(struct hinic_wqs
*wqs
, int max_wqs
,
306 struct hinic_hwif
*hwif
)
308 struct pci_dev
*pdev
= hwif
->pdev
;
309 int err
, i
, page_idx
;
311 max_wqs
= ALIGN(max_wqs
, WQS_BLOCKS_PER_PAGE
);
312 if (max_wqs
> WQS_MAX_NUM_BLOCKS
) {
313 dev_err(&pdev
->dev
, "Invalid max_wqs = %d\n", max_wqs
);
318 wqs
->num_pages
= max_wqs
/ WQS_BLOCKS_PER_PAGE
;
320 if (alloc_page_arrays(wqs
)) {
322 "Failed to allocate mem for page addresses\n");
326 for (page_idx
= 0; page_idx
< wqs
->num_pages
; page_idx
++) {
327 err
= wqs_allocate_page(wqs
, page_idx
);
329 dev_err(&pdev
->dev
, "Failed wq page allocation\n");
330 goto err_wq_allocate_page
;
334 wqs
->free_blocks
= devm_kzalloc(&pdev
->dev
, WQS_FREE_BLOCKS_SIZE(wqs
),
336 if (!wqs
->free_blocks
) {
338 goto err_alloc_blocks
;
341 init_wqs_blocks_arr(wqs
);
345 err_wq_allocate_page
:
346 for (i
= 0; i
< page_idx
; i
++)
347 wqs_free_page(wqs
, i
);
349 free_page_arrays(wqs
);
354 * hinic_wqs_free - free Work Queues set
355 * @wqs: Work Queue Set
357 void hinic_wqs_free(struct hinic_wqs
*wqs
)
359 struct hinic_hwif
*hwif
= wqs
->hwif
;
360 struct pci_dev
*pdev
= hwif
->pdev
;
363 devm_kfree(&pdev
->dev
, wqs
->free_blocks
);
365 for (page_idx
= 0; page_idx
< wqs
->num_pages
; page_idx
++)
366 wqs_free_page(wqs
, page_idx
);
368 free_page_arrays(wqs
);
372 * alloc_wqes_shadow - allocate WQE shadows for WQ
373 * @wq: WQ to allocate shadows for
375 * Return 0 - Success, negative - Failure
377 static int alloc_wqes_shadow(struct hinic_wq
*wq
)
379 struct hinic_hwif
*hwif
= wq
->hwif
;
380 struct pci_dev
*pdev
= hwif
->pdev
;
383 size
= wq
->num_q_pages
* wq
->max_wqe_size
;
384 wq
->shadow_wqe
= devm_kzalloc(&pdev
->dev
, size
, GFP_KERNEL
);
388 size
= wq
->num_q_pages
* sizeof(wq
->prod_idx
);
389 wq
->shadow_idx
= devm_kzalloc(&pdev
->dev
, size
, GFP_KERNEL
);
396 devm_kfree(&pdev
->dev
, wq
->shadow_wqe
);
401 * free_wqes_shadow - free WQE shadows of WQ
402 * @wq: WQ to free shadows from
404 static void free_wqes_shadow(struct hinic_wq
*wq
)
406 struct hinic_hwif
*hwif
= wq
->hwif
;
407 struct pci_dev
*pdev
= hwif
->pdev
;
409 devm_kfree(&pdev
->dev
, wq
->shadow_idx
);
410 devm_kfree(&pdev
->dev
, wq
->shadow_wqe
);
414 * free_wq_pages - free pages of WQ
415 * @hwif: HW interface for releasing dma addresses
416 * @wq: WQ to free pages from
417 * @num_q_pages: number pages to free
419 static void free_wq_pages(struct hinic_wq
*wq
, struct hinic_hwif
*hwif
,
422 struct pci_dev
*pdev
= hwif
->pdev
;
425 for (i
= 0; i
< num_q_pages
; i
++) {
426 void **vaddr
= &wq
->shadow_block_vaddr
[i
];
427 u64
*paddr
= &wq
->block_vaddr
[i
];
430 dma_addr
= (dma_addr_t
)be64_to_cpu(*paddr
);
431 dma_free_coherent(&pdev
->dev
, wq
->wq_page_size
, *vaddr
,
435 free_wqes_shadow(wq
);
439 * alloc_wq_pages - alloc pages for WQ
440 * @hwif: HW interface for allocating dma addresses
441 * @wq: WQ to allocate pages for
442 * @max_pages: maximum pages allowed
444 * Return 0 - Success, negative - Failure
446 static int alloc_wq_pages(struct hinic_wq
*wq
, struct hinic_hwif
*hwif
,
449 struct pci_dev
*pdev
= hwif
->pdev
;
450 int i
, err
, num_q_pages
;
452 num_q_pages
= ALIGN(WQ_SIZE(wq
), wq
->wq_page_size
) / wq
->wq_page_size
;
453 if (num_q_pages
> max_pages
) {
454 dev_err(&pdev
->dev
, "Number wq pages exceeds the limit\n");
458 if (num_q_pages
& (num_q_pages
- 1)) {
459 dev_err(&pdev
->dev
, "Number wq pages must be power of 2\n");
463 wq
->num_q_pages
= num_q_pages
;
465 err
= alloc_wqes_shadow(wq
);
467 dev_err(&pdev
->dev
, "Failed to allocate wqe shadow\n");
471 for (i
= 0; i
< num_q_pages
; i
++) {
472 void **vaddr
= &wq
->shadow_block_vaddr
[i
];
473 u64
*paddr
= &wq
->block_vaddr
[i
];
476 *vaddr
= dma_alloc_coherent(&pdev
->dev
, wq
->wq_page_size
,
477 &dma_addr
, GFP_KERNEL
);
479 dev_err(&pdev
->dev
, "Failed to allocate wq page\n");
480 goto err_alloc_wq_pages
;
483 /* HW uses Big Endian Format */
484 *paddr
= cpu_to_be64(dma_addr
);
490 free_wq_pages(wq
, hwif
, i
);
495 * hinic_wq_allocate - Allocate the WQ resources from the WQS
496 * @wqs: WQ set from which to allocate the WQ resources
497 * @wq: WQ to allocate resources for it from the WQ set
498 * @wqebb_size: Work Queue Block Byte Size
499 * @wq_page_size: the page size in the Work Queue
500 * @q_depth: number of wqebbs in WQ
501 * @max_wqe_size: maximum WQE size that will be used in the WQ
503 * Return 0 - Success, negative - Failure
505 int hinic_wq_allocate(struct hinic_wqs
*wqs
, struct hinic_wq
*wq
,
506 u16 wqebb_size
, u32 wq_page_size
, u16 q_depth
,
509 struct hinic_hwif
*hwif
= wqs
->hwif
;
510 struct pci_dev
*pdev
= hwif
->pdev
;
511 u16 num_wqebbs_per_page
;
512 u16 wqebb_size_shift
;
515 if (!is_power_of_2(wqebb_size
)) {
516 dev_err(&pdev
->dev
, "wqebb_size must be power of 2\n");
520 if (wq_page_size
== 0) {
521 dev_err(&pdev
->dev
, "wq_page_size must be > 0\n");
525 if (q_depth
& (q_depth
- 1)) {
526 dev_err(&pdev
->dev
, "WQ q_depth must be power of 2\n");
530 wqebb_size_shift
= ilog2(wqebb_size
);
531 num_wqebbs_per_page
= ALIGN(wq_page_size
, wqebb_size
)
534 if (!is_power_of_2(num_wqebbs_per_page
)) {
535 dev_err(&pdev
->dev
, "num wqebbs per page must be power of 2\n");
541 err
= wqs_next_block(wqs
, &wq
->page_idx
, &wq
->block_idx
);
543 dev_err(&pdev
->dev
, "Failed to get free wqs next block\n");
547 wq
->wqebb_size
= wqebb_size
;
548 wq
->wq_page_size
= wq_page_size
;
549 wq
->q_depth
= q_depth
;
550 wq
->max_wqe_size
= max_wqe_size
;
551 wq
->num_wqebbs_per_page
= num_wqebbs_per_page
;
552 wq
->wqebbs_per_page_shift
= ilog2(num_wqebbs_per_page
);
553 wq
->wqebb_size_shift
= wqebb_size_shift
;
554 wq
->block_vaddr
= WQ_BASE_VADDR(wqs
, wq
);
555 wq
->shadow_block_vaddr
= WQ_BASE_ADDR(wqs
, wq
);
556 wq
->block_paddr
= WQ_BASE_PADDR(wqs
, wq
);
558 err
= alloc_wq_pages(wq
, wqs
->hwif
, WQ_MAX_PAGES
);
560 dev_err(&pdev
->dev
, "Failed to allocate wq pages\n");
561 goto err_alloc_wq_pages
;
564 atomic_set(&wq
->cons_idx
, 0);
565 atomic_set(&wq
->prod_idx
, 0);
566 atomic_set(&wq
->delta
, q_depth
);
567 wq
->mask
= q_depth
- 1;
572 wqs_return_block(wqs
, wq
->page_idx
, wq
->block_idx
);
577 * hinic_wq_free - Free the WQ resources to the WQS
578 * @wqs: WQ set to free the WQ resources to it
579 * @wq: WQ to free its resources to the WQ set resources
581 void hinic_wq_free(struct hinic_wqs
*wqs
, struct hinic_wq
*wq
)
583 free_wq_pages(wq
, wqs
->hwif
, wq
->num_q_pages
);
585 wqs_return_block(wqs
, wq
->page_idx
, wq
->block_idx
);
589 * hinic_wqs_cmdq_alloc - Allocate wqs for cmdqs
590 * @cmdq_pages: will hold the pages of the cmdq
592 * @hwif: HW interface
593 * @cmdq_blocks: number of cmdq blocks/wq to allocate
594 * @wqebb_size: Work Queue Block Byte Size
595 * @wq_page_size: the page size in the Work Queue
596 * @q_depth: number of wqebbs in WQ
597 * @max_wqe_size: maximum WQE size that will be used in the WQ
599 * Return 0 - Success, negative - Failure
601 int hinic_wqs_cmdq_alloc(struct hinic_cmdq_pages
*cmdq_pages
,
602 struct hinic_wq
*wq
, struct hinic_hwif
*hwif
,
603 int cmdq_blocks
, u16 wqebb_size
, u32 wq_page_size
,
604 u16 q_depth
, u16 max_wqe_size
)
606 struct pci_dev
*pdev
= hwif
->pdev
;
607 u16 num_wqebbs_per_page_shift
;
608 u16 num_wqebbs_per_page
;
609 u16 wqebb_size_shift
;
610 int i
, j
, err
= -ENOMEM
;
612 if (!is_power_of_2(wqebb_size
)) {
613 dev_err(&pdev
->dev
, "wqebb_size must be power of 2\n");
617 if (wq_page_size
== 0) {
618 dev_err(&pdev
->dev
, "wq_page_size must be > 0\n");
622 if (q_depth
& (q_depth
- 1)) {
623 dev_err(&pdev
->dev
, "WQ q_depth must be power of 2\n");
627 wqebb_size_shift
= ilog2(wqebb_size
);
628 num_wqebbs_per_page
= ALIGN(wq_page_size
, wqebb_size
)
631 if (!is_power_of_2(num_wqebbs_per_page
)) {
632 dev_err(&pdev
->dev
, "num wqebbs per page must be power of 2\n");
636 cmdq_pages
->hwif
= hwif
;
638 err
= cmdq_allocate_page(cmdq_pages
);
640 dev_err(&pdev
->dev
, "Failed to allocate CMDQ page\n");
643 num_wqebbs_per_page_shift
= ilog2(num_wqebbs_per_page
);
645 for (i
= 0; i
< cmdq_blocks
; i
++) {
650 wq
[i
].wqebb_size
= wqebb_size
;
651 wq
[i
].wq_page_size
= wq_page_size
;
652 wq
[i
].q_depth
= q_depth
;
653 wq
[i
].max_wqe_size
= max_wqe_size
;
654 wq
[i
].num_wqebbs_per_page
= num_wqebbs_per_page
;
655 wq
[i
].wqebbs_per_page_shift
= num_wqebbs_per_page_shift
;
656 wq
[i
].wqebb_size_shift
= wqebb_size_shift
;
657 wq
[i
].block_vaddr
= CMDQ_BASE_VADDR(cmdq_pages
, &wq
[i
]);
658 wq
[i
].shadow_block_vaddr
= CMDQ_BASE_ADDR(cmdq_pages
, &wq
[i
]);
659 wq
[i
].block_paddr
= CMDQ_BASE_PADDR(cmdq_pages
, &wq
[i
]);
661 err
= alloc_wq_pages(&wq
[i
], cmdq_pages
->hwif
,
664 dev_err(&pdev
->dev
, "Failed to alloc CMDQ blocks\n");
668 atomic_set(&wq
[i
].cons_idx
, 0);
669 atomic_set(&wq
[i
].prod_idx
, 0);
670 atomic_set(&wq
[i
].delta
, q_depth
);
671 wq
[i
].mask
= q_depth
- 1;
677 for (j
= 0; j
< i
; j
++)
678 free_wq_pages(&wq
[j
], cmdq_pages
->hwif
, wq
[j
].num_q_pages
);
680 cmdq_free_page(cmdq_pages
);
685 * hinic_wqs_cmdq_free - Free wqs from cmdqs
686 * @cmdq_pages: hold the pages of the cmdq
688 * @cmdq_blocks: number of wqs to free
690 void hinic_wqs_cmdq_free(struct hinic_cmdq_pages
*cmdq_pages
,
691 struct hinic_wq
*wq
, int cmdq_blocks
)
695 for (i
= 0; i
< cmdq_blocks
; i
++)
696 free_wq_pages(&wq
[i
], cmdq_pages
->hwif
, wq
[i
].num_q_pages
);
698 cmdq_free_page(cmdq_pages
);
701 static void copy_wqe_to_shadow(struct hinic_wq
*wq
, void *shadow_addr
,
702 int num_wqebbs
, u16 idx
)
707 for (i
= 0; i
< num_wqebbs
; i
++, idx
++) {
708 idx
= MASKED_WQE_IDX(wq
, idx
);
709 wqebb_addr
= WQ_PAGE_ADDR(wq
, idx
) +
710 WQE_PAGE_OFF(wq
, idx
);
712 memcpy(shadow_addr
, wqebb_addr
, wq
->wqebb_size
);
714 shadow_addr
+= wq
->wqebb_size
;
718 static void copy_wqe_from_shadow(struct hinic_wq
*wq
, void *shadow_addr
,
719 int num_wqebbs
, u16 idx
)
724 for (i
= 0; i
< num_wqebbs
; i
++, idx
++) {
725 idx
= MASKED_WQE_IDX(wq
, idx
);
726 wqebb_addr
= WQ_PAGE_ADDR(wq
, idx
) +
727 WQE_PAGE_OFF(wq
, idx
);
729 memcpy(wqebb_addr
, shadow_addr
, wq
->wqebb_size
);
730 shadow_addr
+= wq
->wqebb_size
;
735 * hinic_get_wqe - get wqe ptr in the current pi and update the pi
736 * @wq: wq to get wqe from
737 * @wqe_size: wqe size
738 * @prod_idx: returned pi
742 struct hinic_hw_wqe
*hinic_get_wqe(struct hinic_wq
*wq
, unsigned int wqe_size
,
745 int curr_pg
, end_pg
, num_wqebbs
;
746 u16 curr_prod_idx
, end_prod_idx
;
748 *prod_idx
= MASKED_WQE_IDX(wq
, atomic_read(&wq
->prod_idx
));
750 num_wqebbs
= ALIGN(wqe_size
, wq
->wqebb_size
) >> wq
->wqebb_size_shift
;
752 if (atomic_sub_return(num_wqebbs
, &wq
->delta
) <= 0) {
753 atomic_add(num_wqebbs
, &wq
->delta
);
754 return ERR_PTR(-EBUSY
);
757 end_prod_idx
= atomic_add_return(num_wqebbs
, &wq
->prod_idx
);
759 end_prod_idx
= MASKED_WQE_IDX(wq
, end_prod_idx
);
760 curr_prod_idx
= end_prod_idx
- num_wqebbs
;
761 curr_prod_idx
= MASKED_WQE_IDX(wq
, curr_prod_idx
);
763 /* end prod index points to the next wqebb, therefore minus 1 */
764 end_prod_idx
= MASKED_WQE_IDX(wq
, end_prod_idx
- 1);
766 curr_pg
= WQE_PAGE_NUM(wq
, curr_prod_idx
);
767 end_pg
= WQE_PAGE_NUM(wq
, end_prod_idx
);
769 *prod_idx
= curr_prod_idx
;
771 /* If we only have one page, still need to get shadown wqe when
772 * wqe rolling-over page
774 if (curr_pg
!= end_pg
|| MASKED_WQE_IDX(wq
, end_prod_idx
) < *prod_idx
) {
775 void *shadow_addr
= &wq
->shadow_wqe
[curr_pg
* wq
->max_wqe_size
];
777 copy_wqe_to_shadow(wq
, shadow_addr
, num_wqebbs
, *prod_idx
);
779 wq
->shadow_idx
[curr_pg
] = *prod_idx
;
783 return WQ_PAGE_ADDR(wq
, *prod_idx
) + WQE_PAGE_OFF(wq
, *prod_idx
);
787 * hinic_return_wqe - return the wqe when transmit failed
788 * @wq: wq to return wqe
789 * @wqe_size: wqe size
791 void hinic_return_wqe(struct hinic_wq
*wq
, unsigned int wqe_size
)
793 int num_wqebbs
= ALIGN(wqe_size
, wq
->wqebb_size
) / wq
->wqebb_size
;
795 atomic_sub(num_wqebbs
, &wq
->prod_idx
);
797 atomic_add(num_wqebbs
, &wq
->delta
);
801 * hinic_put_wqe - return the wqe place to use for a new wqe
802 * @wq: wq to return wqe
803 * @wqe_size: wqe size
805 void hinic_put_wqe(struct hinic_wq
*wq
, unsigned int wqe_size
)
807 int num_wqebbs
= ALIGN(wqe_size
, wq
->wqebb_size
)
808 >> wq
->wqebb_size_shift
;
810 atomic_add(num_wqebbs
, &wq
->cons_idx
);
812 atomic_add(num_wqebbs
, &wq
->delta
);
816 * hinic_read_wqe - read wqe ptr in the current ci
817 * @wq: wq to get read from
818 * @wqe_size: wqe size
819 * @cons_idx: returned ci
823 struct hinic_hw_wqe
*hinic_read_wqe(struct hinic_wq
*wq
, unsigned int wqe_size
,
826 int num_wqebbs
= ALIGN(wqe_size
, wq
->wqebb_size
)
827 >> wq
->wqebb_size_shift
;
828 u16 curr_cons_idx
, end_cons_idx
;
831 if ((atomic_read(&wq
->delta
) + num_wqebbs
) > wq
->q_depth
)
832 return ERR_PTR(-EBUSY
);
834 curr_cons_idx
= atomic_read(&wq
->cons_idx
);
836 curr_cons_idx
= MASKED_WQE_IDX(wq
, curr_cons_idx
);
837 end_cons_idx
= MASKED_WQE_IDX(wq
, curr_cons_idx
+ num_wqebbs
- 1);
839 curr_pg
= WQE_PAGE_NUM(wq
, curr_cons_idx
);
840 end_pg
= WQE_PAGE_NUM(wq
, end_cons_idx
);
842 *cons_idx
= curr_cons_idx
;
844 if (curr_pg
!= end_pg
) {
845 void *shadow_addr
= &wq
->shadow_wqe
[curr_pg
* wq
->max_wqe_size
];
847 copy_wqe_to_shadow(wq
, shadow_addr
, num_wqebbs
, *cons_idx
);
851 return WQ_PAGE_ADDR(wq
, *cons_idx
) + WQE_PAGE_OFF(wq
, *cons_idx
);
855 * hinic_read_wqe_direct - read wqe directly from ci position
857 * @cons_idx: ci position
861 struct hinic_hw_wqe
*hinic_read_wqe_direct(struct hinic_wq
*wq
, u16 cons_idx
)
863 return WQ_PAGE_ADDR(wq
, cons_idx
) + WQE_PAGE_OFF(wq
, cons_idx
);
867 * wqe_shadow - check if a wqe is shadow
869 * @wqe: the wqe for shadow checking
871 * Return true - shadow, false - Not shadow
873 static inline bool wqe_shadow(struct hinic_wq
*wq
, struct hinic_hw_wqe
*wqe
)
875 size_t wqe_shadow_size
= wq
->num_q_pages
* wq
->max_wqe_size
;
877 return WQE_IN_RANGE(wqe
, wq
->shadow_wqe
,
878 &wq
->shadow_wqe
[wqe_shadow_size
]);
882 * hinic_write_wqe - write the wqe to the wq
883 * @wq: wq to write wqe to
885 * @wqe_size: wqe size
887 void hinic_write_wqe(struct hinic_wq
*wq
, struct hinic_hw_wqe
*wqe
,
888 unsigned int wqe_size
)
890 int curr_pg
, num_wqebbs
;
894 if (wqe_shadow(wq
, wqe
)) {
895 curr_pg
= WQE_SHADOW_PAGE(wq
, wqe
);
897 prod_idx
= wq
->shadow_idx
[curr_pg
];
898 num_wqebbs
= ALIGN(wqe_size
, wq
->wqebb_size
) / wq
->wqebb_size
;
899 shadow_addr
= &wq
->shadow_wqe
[curr_pg
* wq
->max_wqe_size
];
901 copy_wqe_from_shadow(wq
, shadow_addr
, num_wqebbs
, prod_idx
);