1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/cpumask.h>
3 #include <linux/dma-mapping.h>
4 #include <linux/dmapool.h>
5 #include <linux/delay.h>
7 #include <linux/kernel.h>
8 #include <linux/module.h>
9 #include <linux/pci_regs.h>
10 #include <linux/vmalloc.h>
11 #include <linux/pci.h>
13 #include "nitrox_dev.h"
14 #include "nitrox_common.h"
15 #include "nitrox_req.h"
16 #include "nitrox_csr.h"
18 #define CRYPTO_CTX_SIZE 256
20 /* packet inuput ring alignments */
21 #define PKTIN_Q_ALIGN_BYTES 16
22 /* AQM Queue input alignments */
23 #define AQM_Q_ALIGN_BYTES 32
25 static int nitrox_cmdq_init(struct nitrox_cmdq
*cmdq
, int align_bytes
)
27 struct nitrox_device
*ndev
= cmdq
->ndev
;
29 cmdq
->qsize
= (ndev
->qlen
* cmdq
->instr_size
) + align_bytes
;
30 cmdq
->unalign_base
= dma_alloc_coherent(DEV(ndev
), cmdq
->qsize
,
33 if (!cmdq
->unalign_base
)
36 cmdq
->dma
= PTR_ALIGN(cmdq
->unalign_dma
, align_bytes
);
37 cmdq
->base
= cmdq
->unalign_base
+ (cmdq
->dma
- cmdq
->unalign_dma
);
40 spin_lock_init(&cmdq
->cmd_qlock
);
41 spin_lock_init(&cmdq
->resp_qlock
);
42 spin_lock_init(&cmdq
->backlog_qlock
);
44 INIT_LIST_HEAD(&cmdq
->response_head
);
45 INIT_LIST_HEAD(&cmdq
->backlog_head
);
46 INIT_WORK(&cmdq
->backlog_qflush
, backlog_qflush_work
);
48 atomic_set(&cmdq
->pending_count
, 0);
49 atomic_set(&cmdq
->backlog_count
, 0);
53 static void nitrox_cmdq_reset(struct nitrox_cmdq
*cmdq
)
56 atomic_set(&cmdq
->pending_count
, 0);
57 atomic_set(&cmdq
->backlog_count
, 0);
60 static void nitrox_cmdq_cleanup(struct nitrox_cmdq
*cmdq
)
62 struct nitrox_device
*ndev
;
67 if (!cmdq
->unalign_base
)
71 cancel_work_sync(&cmdq
->backlog_qflush
);
73 dma_free_coherent(DEV(ndev
), cmdq
->qsize
,
74 cmdq
->unalign_base
, cmdq
->unalign_dma
);
75 nitrox_cmdq_reset(cmdq
);
77 cmdq
->dbell_csr_addr
= NULL
;
78 cmdq
->compl_cnt_csr_addr
= NULL
;
79 cmdq
->unalign_base
= NULL
;
81 cmdq
->unalign_dma
= 0;
87 static void nitrox_free_aqm_queues(struct nitrox_device
*ndev
)
91 for (i
= 0; i
< ndev
->nr_queues
; i
++) {
92 nitrox_cmdq_cleanup(ndev
->aqmq
[i
]);
93 kzfree(ndev
->aqmq
[i
]);
98 static int nitrox_alloc_aqm_queues(struct nitrox_device
*ndev
)
102 for (i
= 0; i
< ndev
->nr_queues
; i
++) {
103 struct nitrox_cmdq
*cmdq
;
106 cmdq
= kzalloc_node(sizeof(*cmdq
), GFP_KERNEL
, ndev
->node
);
114 cmdq
->instr_size
= sizeof(struct aqmq_command_s
);
116 /* AQM Queue Doorbell Counter Register Address */
117 offset
= AQMQ_DRBLX(i
);
118 cmdq
->dbell_csr_addr
= NITROX_CSR_ADDR(ndev
, offset
);
119 /* AQM Queue Commands Completed Count Register Address */
120 offset
= AQMQ_CMD_CNTX(i
);
121 cmdq
->compl_cnt_csr_addr
= NITROX_CSR_ADDR(ndev
, offset
);
123 err
= nitrox_cmdq_init(cmdq
, AQM_Q_ALIGN_BYTES
);
128 ndev
->aqmq
[i
] = cmdq
;
134 nitrox_free_aqm_queues(ndev
);
138 static void nitrox_free_pktin_queues(struct nitrox_device
*ndev
)
142 for (i
= 0; i
< ndev
->nr_queues
; i
++) {
143 struct nitrox_cmdq
*cmdq
= &ndev
->pkt_inq
[i
];
145 nitrox_cmdq_cleanup(cmdq
);
147 kfree(ndev
->pkt_inq
);
148 ndev
->pkt_inq
= NULL
;
151 static int nitrox_alloc_pktin_queues(struct nitrox_device
*ndev
)
155 ndev
->pkt_inq
= kcalloc_node(ndev
->nr_queues
,
156 sizeof(struct nitrox_cmdq
),
157 GFP_KERNEL
, ndev
->node
);
161 for (i
= 0; i
< ndev
->nr_queues
; i
++) {
162 struct nitrox_cmdq
*cmdq
;
165 cmdq
= &ndev
->pkt_inq
[i
];
168 cmdq
->instr_size
= sizeof(struct nps_pkt_instr
);
170 /* packet input ring doorbell address */
171 offset
= NPS_PKT_IN_INSTR_BAOFF_DBELLX(i
);
172 cmdq
->dbell_csr_addr
= NITROX_CSR_ADDR(ndev
, offset
);
173 /* packet solicit port completion count address */
174 offset
= NPS_PKT_SLC_CNTSX(i
);
175 cmdq
->compl_cnt_csr_addr
= NITROX_CSR_ADDR(ndev
, offset
);
177 err
= nitrox_cmdq_init(cmdq
, PKTIN_Q_ALIGN_BYTES
);
184 nitrox_free_pktin_queues(ndev
);
188 static int create_crypto_dma_pool(struct nitrox_device
*ndev
)
192 /* Crypto context pool, 16 byte aligned */
193 size
= CRYPTO_CTX_SIZE
+ sizeof(struct ctx_hdr
);
194 ndev
->ctx_pool
= dma_pool_create("nitrox-context",
195 DEV(ndev
), size
, 16, 0);
202 static void destroy_crypto_dma_pool(struct nitrox_device
*ndev
)
207 dma_pool_destroy(ndev
->ctx_pool
);
208 ndev
->ctx_pool
= NULL
;
212 * crypto_alloc_context - Allocate crypto context from pool
213 * @ndev: NITROX Device
215 void *crypto_alloc_context(struct nitrox_device
*ndev
)
218 struct crypto_ctx_hdr
*chdr
;
222 chdr
= kmalloc(sizeof(*chdr
), GFP_KERNEL
);
226 vaddr
= dma_pool_zalloc(ndev
->ctx_pool
, GFP_KERNEL
, &dma
);
234 ctx
->pool
= ndev
->ctx_pool
;
236 ctx
->ctx_dma
= dma
+ sizeof(struct ctx_hdr
);
238 chdr
->pool
= ndev
->ctx_pool
;
246 * crypto_free_context - Free crypto context to pool
247 * @ctx: context to free
249 void crypto_free_context(void *ctx
)
251 struct crypto_ctx_hdr
*ctxp
;
257 dma_pool_free(ctxp
->pool
, ctxp
->vaddr
, ctxp
->dma
);
262 * nitrox_common_sw_init - allocate software resources.
263 * @ndev: NITROX device
265 * Allocates crypto context pools and command queues etc.
267 * Return: 0 on success, or a negative error code on error.
269 int nitrox_common_sw_init(struct nitrox_device
*ndev
)
273 /* per device crypto context pool */
274 err
= create_crypto_dma_pool(ndev
);
278 err
= nitrox_alloc_pktin_queues(ndev
);
280 destroy_crypto_dma_pool(ndev
);
282 err
= nitrox_alloc_aqm_queues(ndev
);
284 nitrox_free_pktin_queues(ndev
);
285 destroy_crypto_dma_pool(ndev
);
292 * nitrox_common_sw_cleanup - free software resources.
293 * @ndev: NITROX device
295 void nitrox_common_sw_cleanup(struct nitrox_device
*ndev
)
297 nitrox_free_aqm_queues(ndev
);
298 nitrox_free_pktin_queues(ndev
);
299 destroy_crypto_dma_pool(ndev
);