gpio: rcar: Fix runtime PM imbalance on error
[linux/fpc-iii.git] / drivers / crypto / cavium / nitrox / nitrox_lib.c
blob5cbc64b851b993b8452236222ddce1b165b62b2a
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/cpumask.h>
3 #include <linux/dma-mapping.h>
4 #include <linux/dmapool.h>
5 #include <linux/delay.h>
6 #include <linux/gfp.h>
7 #include <linux/kernel.h>
8 #include <linux/module.h>
9 #include <linux/pci_regs.h>
10 #include <linux/vmalloc.h>
11 #include <linux/pci.h>
13 #include "nitrox_dev.h"
14 #include "nitrox_common.h"
15 #include "nitrox_req.h"
16 #include "nitrox_csr.h"
18 #define CRYPTO_CTX_SIZE 256
20 /* packet inuput ring alignments */
21 #define PKTIN_Q_ALIGN_BYTES 16
22 /* AQM Queue input alignments */
23 #define AQM_Q_ALIGN_BYTES 32
25 static int nitrox_cmdq_init(struct nitrox_cmdq *cmdq, int align_bytes)
27 struct nitrox_device *ndev = cmdq->ndev;
29 cmdq->qsize = (ndev->qlen * cmdq->instr_size) + align_bytes;
30 cmdq->unalign_base = dma_alloc_coherent(DEV(ndev), cmdq->qsize,
31 &cmdq->unalign_dma,
32 GFP_KERNEL);
33 if (!cmdq->unalign_base)
34 return -ENOMEM;
36 cmdq->dma = PTR_ALIGN(cmdq->unalign_dma, align_bytes);
37 cmdq->base = cmdq->unalign_base + (cmdq->dma - cmdq->unalign_dma);
38 cmdq->write_idx = 0;
40 spin_lock_init(&cmdq->cmd_qlock);
41 spin_lock_init(&cmdq->resp_qlock);
42 spin_lock_init(&cmdq->backlog_qlock);
44 INIT_LIST_HEAD(&cmdq->response_head);
45 INIT_LIST_HEAD(&cmdq->backlog_head);
46 INIT_WORK(&cmdq->backlog_qflush, backlog_qflush_work);
48 atomic_set(&cmdq->pending_count, 0);
49 atomic_set(&cmdq->backlog_count, 0);
50 return 0;
53 static void nitrox_cmdq_reset(struct nitrox_cmdq *cmdq)
55 cmdq->write_idx = 0;
56 atomic_set(&cmdq->pending_count, 0);
57 atomic_set(&cmdq->backlog_count, 0);
60 static void nitrox_cmdq_cleanup(struct nitrox_cmdq *cmdq)
62 struct nitrox_device *ndev;
64 if (!cmdq)
65 return;
67 if (!cmdq->unalign_base)
68 return;
70 ndev = cmdq->ndev;
71 cancel_work_sync(&cmdq->backlog_qflush);
73 dma_free_coherent(DEV(ndev), cmdq->qsize,
74 cmdq->unalign_base, cmdq->unalign_dma);
75 nitrox_cmdq_reset(cmdq);
77 cmdq->dbell_csr_addr = NULL;
78 cmdq->compl_cnt_csr_addr = NULL;
79 cmdq->unalign_base = NULL;
80 cmdq->base = NULL;
81 cmdq->unalign_dma = 0;
82 cmdq->dma = 0;
83 cmdq->qsize = 0;
84 cmdq->instr_size = 0;
87 static void nitrox_free_aqm_queues(struct nitrox_device *ndev)
89 int i;
91 for (i = 0; i < ndev->nr_queues; i++) {
92 nitrox_cmdq_cleanup(ndev->aqmq[i]);
93 kzfree(ndev->aqmq[i]);
94 ndev->aqmq[i] = NULL;
98 static int nitrox_alloc_aqm_queues(struct nitrox_device *ndev)
100 int i, err;
102 for (i = 0; i < ndev->nr_queues; i++) {
103 struct nitrox_cmdq *cmdq;
104 u64 offset;
106 cmdq = kzalloc_node(sizeof(*cmdq), GFP_KERNEL, ndev->node);
107 if (!cmdq) {
108 err = -ENOMEM;
109 goto aqmq_fail;
112 cmdq->ndev = ndev;
113 cmdq->qno = i;
114 cmdq->instr_size = sizeof(struct aqmq_command_s);
116 /* AQM Queue Doorbell Counter Register Address */
117 offset = AQMQ_DRBLX(i);
118 cmdq->dbell_csr_addr = NITROX_CSR_ADDR(ndev, offset);
119 /* AQM Queue Commands Completed Count Register Address */
120 offset = AQMQ_CMD_CNTX(i);
121 cmdq->compl_cnt_csr_addr = NITROX_CSR_ADDR(ndev, offset);
123 err = nitrox_cmdq_init(cmdq, AQM_Q_ALIGN_BYTES);
124 if (err) {
125 kzfree(cmdq);
126 goto aqmq_fail;
128 ndev->aqmq[i] = cmdq;
131 return 0;
133 aqmq_fail:
134 nitrox_free_aqm_queues(ndev);
135 return err;
138 static void nitrox_free_pktin_queues(struct nitrox_device *ndev)
140 int i;
142 for (i = 0; i < ndev->nr_queues; i++) {
143 struct nitrox_cmdq *cmdq = &ndev->pkt_inq[i];
145 nitrox_cmdq_cleanup(cmdq);
147 kfree(ndev->pkt_inq);
148 ndev->pkt_inq = NULL;
151 static int nitrox_alloc_pktin_queues(struct nitrox_device *ndev)
153 int i, err;
155 ndev->pkt_inq = kcalloc_node(ndev->nr_queues,
156 sizeof(struct nitrox_cmdq),
157 GFP_KERNEL, ndev->node);
158 if (!ndev->pkt_inq)
159 return -ENOMEM;
161 for (i = 0; i < ndev->nr_queues; i++) {
162 struct nitrox_cmdq *cmdq;
163 u64 offset;
165 cmdq = &ndev->pkt_inq[i];
166 cmdq->ndev = ndev;
167 cmdq->qno = i;
168 cmdq->instr_size = sizeof(struct nps_pkt_instr);
170 /* packet input ring doorbell address */
171 offset = NPS_PKT_IN_INSTR_BAOFF_DBELLX(i);
172 cmdq->dbell_csr_addr = NITROX_CSR_ADDR(ndev, offset);
173 /* packet solicit port completion count address */
174 offset = NPS_PKT_SLC_CNTSX(i);
175 cmdq->compl_cnt_csr_addr = NITROX_CSR_ADDR(ndev, offset);
177 err = nitrox_cmdq_init(cmdq, PKTIN_Q_ALIGN_BYTES);
178 if (err)
179 goto pktq_fail;
181 return 0;
183 pktq_fail:
184 nitrox_free_pktin_queues(ndev);
185 return err;
188 static int create_crypto_dma_pool(struct nitrox_device *ndev)
190 size_t size;
192 /* Crypto context pool, 16 byte aligned */
193 size = CRYPTO_CTX_SIZE + sizeof(struct ctx_hdr);
194 ndev->ctx_pool = dma_pool_create("nitrox-context",
195 DEV(ndev), size, 16, 0);
196 if (!ndev->ctx_pool)
197 return -ENOMEM;
199 return 0;
202 static void destroy_crypto_dma_pool(struct nitrox_device *ndev)
204 if (!ndev->ctx_pool)
205 return;
207 dma_pool_destroy(ndev->ctx_pool);
208 ndev->ctx_pool = NULL;
212 * crypto_alloc_context - Allocate crypto context from pool
213 * @ndev: NITROX Device
215 void *crypto_alloc_context(struct nitrox_device *ndev)
217 struct ctx_hdr *ctx;
218 struct crypto_ctx_hdr *chdr;
219 void *vaddr;
220 dma_addr_t dma;
222 chdr = kmalloc(sizeof(*chdr), GFP_KERNEL);
223 if (!chdr)
224 return NULL;
226 vaddr = dma_pool_zalloc(ndev->ctx_pool, GFP_KERNEL, &dma);
227 if (!vaddr) {
228 kfree(chdr);
229 return NULL;
232 /* fill meta data */
233 ctx = vaddr;
234 ctx->pool = ndev->ctx_pool;
235 ctx->dma = dma;
236 ctx->ctx_dma = dma + sizeof(struct ctx_hdr);
238 chdr->pool = ndev->ctx_pool;
239 chdr->dma = dma;
240 chdr->vaddr = vaddr;
242 return chdr;
246 * crypto_free_context - Free crypto context to pool
247 * @ctx: context to free
249 void crypto_free_context(void *ctx)
251 struct crypto_ctx_hdr *ctxp;
253 if (!ctx)
254 return;
256 ctxp = ctx;
257 dma_pool_free(ctxp->pool, ctxp->vaddr, ctxp->dma);
258 kfree(ctxp);
262 * nitrox_common_sw_init - allocate software resources.
263 * @ndev: NITROX device
265 * Allocates crypto context pools and command queues etc.
267 * Return: 0 on success, or a negative error code on error.
269 int nitrox_common_sw_init(struct nitrox_device *ndev)
271 int err = 0;
273 /* per device crypto context pool */
274 err = create_crypto_dma_pool(ndev);
275 if (err)
276 return err;
278 err = nitrox_alloc_pktin_queues(ndev);
279 if (err)
280 destroy_crypto_dma_pool(ndev);
282 err = nitrox_alloc_aqm_queues(ndev);
283 if (err) {
284 nitrox_free_pktin_queues(ndev);
285 destroy_crypto_dma_pool(ndev);
288 return err;
292 * nitrox_common_sw_cleanup - free software resources.
293 * @ndev: NITROX device
295 void nitrox_common_sw_cleanup(struct nitrox_device *ndev)
297 nitrox_free_aqm_queues(ndev);
298 nitrox_free_pktin_queues(ndev);
299 destroy_crypto_dma_pool(ndev);