Merge tag 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mst/vhost
[cris-mirror.git] / drivers / crypto / inside-secure / safexcel.c
blob225e74a7f72484b008d7aa7d8cb79304b9a2f958
1 /*
2 * Copyright (C) 2017 Marvell
4 * Antoine Tenart <antoine.tenart@free-electrons.com>
6 * This file is licensed under the terms of the GNU General Public
7 * License version 2. This program is licensed "as is" without any
8 * warranty of any kind, whether express or implied.
9 */
11 #include <linux/clk.h>
12 #include <linux/device.h>
13 #include <linux/dma-mapping.h>
14 #include <linux/dmapool.h>
15 #include <linux/firmware.h>
16 #include <linux/interrupt.h>
17 #include <linux/module.h>
18 #include <linux/of_platform.h>
19 #include <linux/of_irq.h>
20 #include <linux/platform_device.h>
21 #include <linux/workqueue.h>
23 #include <crypto/internal/hash.h>
24 #include <crypto/internal/skcipher.h>
26 #include "safexcel.h"
28 static u32 max_rings = EIP197_MAX_RINGS;
29 module_param(max_rings, uint, 0644);
30 MODULE_PARM_DESC(max_rings, "Maximum number of rings to use.");
32 static void eip197_trc_cache_init(struct safexcel_crypto_priv *priv)
34 u32 val, htable_offset;
35 int i;
37 /* Enable the record cache memory access */
38 val = readl(priv->base + EIP197_CS_RAM_CTRL);
39 val &= ~EIP197_TRC_ENABLE_MASK;
40 val |= EIP197_TRC_ENABLE_0;
41 writel(val, priv->base + EIP197_CS_RAM_CTRL);
43 /* Clear all ECC errors */
44 writel(0, priv->base + EIP197_TRC_ECCCTRL);
47 * Make sure the cache memory is accessible by taking record cache into
48 * reset.
50 val = readl(priv->base + EIP197_TRC_PARAMS);
51 val |= EIP197_TRC_PARAMS_SW_RESET;
52 val &= ~EIP197_TRC_PARAMS_DATA_ACCESS;
53 writel(val, priv->base + EIP197_TRC_PARAMS);
55 /* Clear all records */
56 for (i = 0; i < EIP197_CS_RC_MAX; i++) {
57 u32 val, offset = EIP197_CLASSIFICATION_RAMS + i * EIP197_CS_RC_SIZE;
59 writel(EIP197_CS_RC_NEXT(EIP197_RC_NULL) |
60 EIP197_CS_RC_PREV(EIP197_RC_NULL),
61 priv->base + offset);
63 val = EIP197_CS_RC_NEXT(i+1) | EIP197_CS_RC_PREV(i-1);
64 if (i == 0)
65 val |= EIP197_CS_RC_PREV(EIP197_RC_NULL);
66 else if (i == EIP197_CS_RC_MAX - 1)
67 val |= EIP197_CS_RC_NEXT(EIP197_RC_NULL);
68 writel(val, priv->base + offset + sizeof(u32));
71 /* Clear the hash table entries */
72 htable_offset = EIP197_CS_RC_MAX * EIP197_CS_RC_SIZE;
73 for (i = 0; i < 64; i++)
74 writel(GENMASK(29, 0),
75 priv->base + EIP197_CLASSIFICATION_RAMS + htable_offset + i * sizeof(u32));
77 /* Disable the record cache memory access */
78 val = readl(priv->base + EIP197_CS_RAM_CTRL);
79 val &= ~EIP197_TRC_ENABLE_MASK;
80 writel(val, priv->base + EIP197_CS_RAM_CTRL);
82 /* Write head and tail pointers of the record free chain */
83 val = EIP197_TRC_FREECHAIN_HEAD_PTR(0) |
84 EIP197_TRC_FREECHAIN_TAIL_PTR(EIP197_CS_RC_MAX - 1);
85 writel(val, priv->base + EIP197_TRC_FREECHAIN);
87 /* Configure the record cache #1 */
88 val = EIP197_TRC_PARAMS2_RC_SZ_SMALL(EIP197_CS_TRC_REC_WC) |
89 EIP197_TRC_PARAMS2_HTABLE_PTR(EIP197_CS_RC_MAX);
90 writel(val, priv->base + EIP197_TRC_PARAMS2);
92 /* Configure the record cache #2 */
93 val = EIP197_TRC_PARAMS_RC_SZ_LARGE(EIP197_CS_TRC_LG_REC_WC) |
94 EIP197_TRC_PARAMS_BLK_TIMER_SPEED(1) |
95 EIP197_TRC_PARAMS_HTABLE_SZ(2);
96 writel(val, priv->base + EIP197_TRC_PARAMS);
99 static void eip197_write_firmware(struct safexcel_crypto_priv *priv,
100 const struct firmware *fw, u32 ctrl,
101 u32 prog_en)
103 const u32 *data = (const u32 *)fw->data;
104 u32 val;
105 int i;
107 /* Reset the engine to make its program memory accessible */
108 writel(EIP197_PE_ICE_x_CTRL_SW_RESET |
109 EIP197_PE_ICE_x_CTRL_CLR_ECC_CORR |
110 EIP197_PE_ICE_x_CTRL_CLR_ECC_NON_CORR,
111 EIP197_PE(priv) + ctrl);
113 /* Enable access to the program memory */
114 writel(prog_en, EIP197_PE(priv) + EIP197_PE_ICE_RAM_CTRL);
116 /* Write the firmware */
117 for (i = 0; i < fw->size / sizeof(u32); i++)
118 writel(be32_to_cpu(data[i]),
119 priv->base + EIP197_CLASSIFICATION_RAMS + i * sizeof(u32));
121 /* Disable access to the program memory */
122 writel(0, EIP197_PE(priv) + EIP197_PE_ICE_RAM_CTRL);
124 /* Release engine from reset */
125 val = readl(EIP197_PE(priv) + ctrl);
126 val &= ~EIP197_PE_ICE_x_CTRL_SW_RESET;
127 writel(val, EIP197_PE(priv) + ctrl);
130 static int eip197_load_firmwares(struct safexcel_crypto_priv *priv)
132 const char *fw_name[] = {"ifpp.bin", "ipue.bin"};
133 const struct firmware *fw[FW_NB];
134 int i, j, ret = 0;
135 u32 val;
137 for (i = 0; i < FW_NB; i++) {
138 ret = request_firmware(&fw[i], fw_name[i], priv->dev);
139 if (ret) {
140 dev_err(priv->dev,
141 "Failed to request firmware %s (%d)\n",
142 fw_name[i], ret);
143 goto release_fw;
147 /* Clear the scratchpad memory */
148 val = readl(EIP197_PE(priv) + EIP197_PE_ICE_SCRATCH_CTRL);
149 val |= EIP197_PE_ICE_SCRATCH_CTRL_CHANGE_TIMER |
150 EIP197_PE_ICE_SCRATCH_CTRL_TIMER_EN |
151 EIP197_PE_ICE_SCRATCH_CTRL_SCRATCH_ACCESS |
152 EIP197_PE_ICE_SCRATCH_CTRL_CHANGE_ACCESS;
153 writel(val, EIP197_PE(priv) + EIP197_PE_ICE_SCRATCH_CTRL);
155 memset(EIP197_PE(priv) + EIP197_PE_ICE_SCRATCH_RAM, 0,
156 EIP197_NUM_OF_SCRATCH_BLOCKS * sizeof(u32));
158 eip197_write_firmware(priv, fw[FW_IFPP], EIP197_PE_ICE_FPP_CTRL,
159 EIP197_PE_ICE_RAM_CTRL_FPP_PROG_EN);
161 eip197_write_firmware(priv, fw[FW_IPUE], EIP197_PE_ICE_PUE_CTRL,
162 EIP197_PE_ICE_RAM_CTRL_PUE_PROG_EN);
164 release_fw:
165 for (j = 0; j < i; j++)
166 release_firmware(fw[j]);
168 return ret;
171 static int safexcel_hw_setup_cdesc_rings(struct safexcel_crypto_priv *priv)
173 u32 hdw, cd_size_rnd, val;
174 int i;
176 hdw = readl(EIP197_HIA_AIC_G(priv) + EIP197_HIA_OPTIONS);
177 hdw &= GENMASK(27, 25);
178 hdw >>= 25;
180 cd_size_rnd = (priv->config.cd_size + (BIT(hdw) - 1)) >> hdw;
182 for (i = 0; i < priv->config.rings; i++) {
183 /* ring base address */
184 writel(lower_32_bits(priv->ring[i].cdr.base_dma),
185 EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_LO);
186 writel(upper_32_bits(priv->ring[i].cdr.base_dma),
187 EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_HI);
189 writel(EIP197_xDR_DESC_MODE_64BIT | (priv->config.cd_offset << 16) |
190 priv->config.cd_size,
191 EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_DESC_SIZE);
192 writel(((EIP197_FETCH_COUNT * (cd_size_rnd << hdw)) << 16) |
193 (EIP197_FETCH_COUNT * priv->config.cd_offset),
194 EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_CFG);
196 /* Configure DMA tx control */
197 val = EIP197_HIA_xDR_CFG_WR_CACHE(WR_CACHE_3BITS);
198 val |= EIP197_HIA_xDR_CFG_RD_CACHE(RD_CACHE_3BITS);
199 writel(val, EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_DMA_CFG);
201 /* clear any pending interrupt */
202 writel(GENMASK(5, 0),
203 EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_STAT);
206 return 0;
209 static int safexcel_hw_setup_rdesc_rings(struct safexcel_crypto_priv *priv)
211 u32 hdw, rd_size_rnd, val;
212 int i;
214 hdw = readl(EIP197_HIA_AIC_G(priv) + EIP197_HIA_OPTIONS);
215 hdw &= GENMASK(27, 25);
216 hdw >>= 25;
218 rd_size_rnd = (priv->config.rd_size + (BIT(hdw) - 1)) >> hdw;
220 for (i = 0; i < priv->config.rings; i++) {
221 /* ring base address */
222 writel(lower_32_bits(priv->ring[i].rdr.base_dma),
223 EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_LO);
224 writel(upper_32_bits(priv->ring[i].rdr.base_dma),
225 EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_HI);
227 writel(EIP197_xDR_DESC_MODE_64BIT | (priv->config.rd_offset << 16) |
228 priv->config.rd_size,
229 EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_DESC_SIZE);
231 writel(((EIP197_FETCH_COUNT * (rd_size_rnd << hdw)) << 16) |
232 (EIP197_FETCH_COUNT * priv->config.rd_offset),
233 EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_CFG);
235 /* Configure DMA tx control */
236 val = EIP197_HIA_xDR_CFG_WR_CACHE(WR_CACHE_3BITS);
237 val |= EIP197_HIA_xDR_CFG_RD_CACHE(RD_CACHE_3BITS);
238 val |= EIP197_HIA_xDR_WR_RES_BUF | EIP197_HIA_xDR_WR_CTRL_BUG;
239 writel(val,
240 EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_DMA_CFG);
242 /* clear any pending interrupt */
243 writel(GENMASK(7, 0),
244 EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_STAT);
246 /* enable ring interrupt */
247 val = readl(EIP197_HIA_AIC_R(priv) + EIP197_HIA_AIC_R_ENABLE_CTRL(i));
248 val |= EIP197_RDR_IRQ(i);
249 writel(val, EIP197_HIA_AIC_R(priv) + EIP197_HIA_AIC_R_ENABLE_CTRL(i));
252 return 0;
255 static int safexcel_hw_init(struct safexcel_crypto_priv *priv)
257 u32 version, val;
258 int i, ret;
260 /* Determine endianess and configure byte swap */
261 version = readl(EIP197_HIA_AIC(priv) + EIP197_HIA_VERSION);
262 val = readl(EIP197_HIA_AIC(priv) + EIP197_HIA_MST_CTRL);
264 if ((version & 0xffff) == EIP197_HIA_VERSION_BE)
265 val |= EIP197_MST_CTRL_BYTE_SWAP;
266 else if (((version >> 16) & 0xffff) == EIP197_HIA_VERSION_LE)
267 val |= (EIP197_MST_CTRL_NO_BYTE_SWAP >> 24);
269 writel(val, EIP197_HIA_AIC(priv) + EIP197_HIA_MST_CTRL);
271 /* Configure wr/rd cache values */
272 writel(EIP197_MST_CTRL_RD_CACHE(RD_CACHE_4BITS) |
273 EIP197_MST_CTRL_WD_CACHE(WR_CACHE_4BITS),
274 EIP197_HIA_GEN_CFG(priv) + EIP197_MST_CTRL);
276 /* Interrupts reset */
278 /* Disable all global interrupts */
279 writel(0, EIP197_HIA_AIC_G(priv) + EIP197_HIA_AIC_G_ENABLE_CTRL);
281 /* Clear any pending interrupt */
282 writel(GENMASK(31, 0), EIP197_HIA_AIC_G(priv) + EIP197_HIA_AIC_G_ACK);
284 /* Data Fetch Engine configuration */
286 /* Reset all DFE threads */
287 writel(EIP197_DxE_THR_CTRL_RESET_PE,
288 EIP197_HIA_DFE_THR(priv) + EIP197_HIA_DFE_THR_CTRL);
290 if (priv->version == EIP197) {
291 /* Reset HIA input interface arbiter */
292 writel(EIP197_HIA_RA_PE_CTRL_RESET,
293 EIP197_HIA_AIC(priv) + EIP197_HIA_RA_PE_CTRL);
296 /* DMA transfer size to use */
297 val = EIP197_HIA_DFE_CFG_DIS_DEBUG;
298 val |= EIP197_HIA_DxE_CFG_MIN_DATA_SIZE(5) | EIP197_HIA_DxE_CFG_MAX_DATA_SIZE(9);
299 val |= EIP197_HIA_DxE_CFG_MIN_CTRL_SIZE(5) | EIP197_HIA_DxE_CFG_MAX_CTRL_SIZE(7);
300 val |= EIP197_HIA_DxE_CFG_DATA_CACHE_CTRL(RD_CACHE_3BITS);
301 val |= EIP197_HIA_DxE_CFG_CTRL_CACHE_CTRL(RD_CACHE_3BITS);
302 writel(val, EIP197_HIA_DFE(priv) + EIP197_HIA_DFE_CFG);
304 /* Leave the DFE threads reset state */
305 writel(0, EIP197_HIA_DFE_THR(priv) + EIP197_HIA_DFE_THR_CTRL);
307 /* Configure the procesing engine thresholds */
308 writel(EIP197_PE_IN_xBUF_THRES_MIN(5) | EIP197_PE_IN_xBUF_THRES_MAX(9),
309 EIP197_PE(priv) + EIP197_PE_IN_DBUF_THRES);
310 writel(EIP197_PE_IN_xBUF_THRES_MIN(5) | EIP197_PE_IN_xBUF_THRES_MAX(7),
311 EIP197_PE(priv) + EIP197_PE_IN_TBUF_THRES);
313 if (priv->version == EIP197) {
314 /* enable HIA input interface arbiter and rings */
315 writel(EIP197_HIA_RA_PE_CTRL_EN |
316 GENMASK(priv->config.rings - 1, 0),
317 EIP197_HIA_AIC(priv) + EIP197_HIA_RA_PE_CTRL);
320 /* Data Store Engine configuration */
322 /* Reset all DSE threads */
323 writel(EIP197_DxE_THR_CTRL_RESET_PE,
324 EIP197_HIA_DSE_THR(priv) + EIP197_HIA_DSE_THR_CTRL);
326 /* Wait for all DSE threads to complete */
327 while ((readl(EIP197_HIA_DSE_THR(priv) + EIP197_HIA_DSE_THR_STAT) &
328 GENMASK(15, 12)) != GENMASK(15, 12))
331 /* DMA transfer size to use */
332 val = EIP197_HIA_DSE_CFG_DIS_DEBUG;
333 val |= EIP197_HIA_DxE_CFG_MIN_DATA_SIZE(7) | EIP197_HIA_DxE_CFG_MAX_DATA_SIZE(8);
334 val |= EIP197_HIA_DxE_CFG_DATA_CACHE_CTRL(WR_CACHE_3BITS);
335 val |= EIP197_HIA_DSE_CFG_ALLWAYS_BUFFERABLE;
336 /* FIXME: instability issues can occur for EIP97 but disabling it impact
337 * performances.
339 if (priv->version == EIP197)
340 val |= EIP197_HIA_DSE_CFG_EN_SINGLE_WR;
341 writel(val, EIP197_HIA_DSE(priv) + EIP197_HIA_DSE_CFG);
343 /* Leave the DSE threads reset state */
344 writel(0, EIP197_HIA_DSE_THR(priv) + EIP197_HIA_DSE_THR_CTRL);
346 /* Configure the procesing engine thresholds */
347 writel(EIP197_PE_OUT_DBUF_THRES_MIN(7) | EIP197_PE_OUT_DBUF_THRES_MAX(8),
348 EIP197_PE(priv) + EIP197_PE_OUT_DBUF_THRES);
350 /* Processing Engine configuration */
352 /* H/W capabilities selection */
353 val = EIP197_FUNCTION_RSVD;
354 val |= EIP197_PROTOCOL_ENCRYPT_ONLY | EIP197_PROTOCOL_HASH_ONLY;
355 val |= EIP197_ALG_AES_ECB | EIP197_ALG_AES_CBC;
356 val |= EIP197_ALG_SHA1 | EIP197_ALG_HMAC_SHA1;
357 val |= EIP197_ALG_SHA2;
358 writel(val, EIP197_PE(priv) + EIP197_PE_EIP96_FUNCTION_EN);
360 /* Command Descriptor Rings prepare */
361 for (i = 0; i < priv->config.rings; i++) {
362 /* Clear interrupts for this ring */
363 writel(GENMASK(31, 0),
364 EIP197_HIA_AIC_R(priv) + EIP197_HIA_AIC_R_ENABLE_CLR(i));
366 /* Disable external triggering */
367 writel(0, EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_CFG);
369 /* Clear the pending prepared counter */
370 writel(EIP197_xDR_PREP_CLR_COUNT,
371 EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_PREP_COUNT);
373 /* Clear the pending processed counter */
374 writel(EIP197_xDR_PROC_CLR_COUNT,
375 EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_PROC_COUNT);
377 writel(0,
378 EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_PREP_PNTR);
379 writel(0,
380 EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_PROC_PNTR);
382 writel((EIP197_DEFAULT_RING_SIZE * priv->config.cd_offset) << 2,
383 EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_RING_SIZE);
386 /* Result Descriptor Ring prepare */
387 for (i = 0; i < priv->config.rings; i++) {
388 /* Disable external triggering*/
389 writel(0, EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_CFG);
391 /* Clear the pending prepared counter */
392 writel(EIP197_xDR_PREP_CLR_COUNT,
393 EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_PREP_COUNT);
395 /* Clear the pending processed counter */
396 writel(EIP197_xDR_PROC_CLR_COUNT,
397 EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_PROC_COUNT);
399 writel(0,
400 EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_PREP_PNTR);
401 writel(0,
402 EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_PROC_PNTR);
404 /* Ring size */
405 writel((EIP197_DEFAULT_RING_SIZE * priv->config.rd_offset) << 2,
406 EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_RING_SIZE);
409 /* Enable command descriptor rings */
410 writel(EIP197_DxE_THR_CTRL_EN | GENMASK(priv->config.rings - 1, 0),
411 EIP197_HIA_DFE_THR(priv) + EIP197_HIA_DFE_THR_CTRL);
413 /* Enable result descriptor rings */
414 writel(EIP197_DxE_THR_CTRL_EN | GENMASK(priv->config.rings - 1, 0),
415 EIP197_HIA_DSE_THR(priv) + EIP197_HIA_DSE_THR_CTRL);
417 /* Clear any HIA interrupt */
418 writel(GENMASK(30, 20), EIP197_HIA_AIC_G(priv) + EIP197_HIA_AIC_G_ACK);
420 if (priv->version == EIP197) {
421 eip197_trc_cache_init(priv);
423 ret = eip197_load_firmwares(priv);
424 if (ret)
425 return ret;
428 safexcel_hw_setup_cdesc_rings(priv);
429 safexcel_hw_setup_rdesc_rings(priv);
431 return 0;
434 /* Called with ring's lock taken */
435 static int safexcel_try_push_requests(struct safexcel_crypto_priv *priv,
436 int ring, int reqs)
438 int coal = min_t(int, reqs, EIP197_MAX_BATCH_SZ);
440 if (!coal)
441 return 0;
443 /* Configure when we want an interrupt */
444 writel(EIP197_HIA_RDR_THRESH_PKT_MODE |
445 EIP197_HIA_RDR_THRESH_PROC_PKT(coal),
446 EIP197_HIA_RDR(priv, ring) + EIP197_HIA_xDR_THRESH);
448 return coal;
451 void safexcel_dequeue(struct safexcel_crypto_priv *priv, int ring)
453 struct crypto_async_request *req, *backlog;
454 struct safexcel_context *ctx;
455 struct safexcel_request *request;
456 int ret, nreq = 0, cdesc = 0, rdesc = 0, commands, results;
458 /* If a request wasn't properly dequeued because of a lack of resources,
459 * proceeded it first,
461 req = priv->ring[ring].req;
462 backlog = priv->ring[ring].backlog;
463 if (req)
464 goto handle_req;
466 while (true) {
467 spin_lock_bh(&priv->ring[ring].queue_lock);
468 backlog = crypto_get_backlog(&priv->ring[ring].queue);
469 req = crypto_dequeue_request(&priv->ring[ring].queue);
470 spin_unlock_bh(&priv->ring[ring].queue_lock);
472 if (!req) {
473 priv->ring[ring].req = NULL;
474 priv->ring[ring].backlog = NULL;
475 goto finalize;
478 handle_req:
479 request = kzalloc(sizeof(*request), EIP197_GFP_FLAGS(*req));
480 if (!request)
481 goto request_failed;
483 ctx = crypto_tfm_ctx(req->tfm);
484 ret = ctx->send(req, ring, request, &commands, &results);
485 if (ret) {
486 kfree(request);
487 goto request_failed;
490 if (backlog)
491 backlog->complete(backlog, -EINPROGRESS);
493 spin_lock_bh(&priv->ring[ring].egress_lock);
494 list_add_tail(&request->list, &priv->ring[ring].list);
495 spin_unlock_bh(&priv->ring[ring].egress_lock);
497 cdesc += commands;
498 rdesc += results;
499 nreq++;
502 request_failed:
503 /* Not enough resources to handle all the requests. Bail out and save
504 * the request and the backlog for the next dequeue call (per-ring).
506 priv->ring[ring].req = req;
507 priv->ring[ring].backlog = backlog;
509 finalize:
510 if (!nreq)
511 return;
513 spin_lock_bh(&priv->ring[ring].egress_lock);
515 if (!priv->ring[ring].busy) {
516 nreq -= safexcel_try_push_requests(priv, ring, nreq);
517 if (nreq)
518 priv->ring[ring].busy = true;
521 priv->ring[ring].requests_left += nreq;
523 spin_unlock_bh(&priv->ring[ring].egress_lock);
525 /* let the RDR know we have pending descriptors */
526 writel((rdesc * priv->config.rd_offset) << 2,
527 EIP197_HIA_RDR(priv, ring) + EIP197_HIA_xDR_PREP_COUNT);
529 /* let the CDR know we have pending descriptors */
530 writel((cdesc * priv->config.cd_offset) << 2,
531 EIP197_HIA_CDR(priv, ring) + EIP197_HIA_xDR_PREP_COUNT);
534 void safexcel_free_context(struct safexcel_crypto_priv *priv,
535 struct crypto_async_request *req,
536 int result_sz)
538 struct safexcel_context *ctx = crypto_tfm_ctx(req->tfm);
540 if (ctx->result_dma)
541 dma_unmap_single(priv->dev, ctx->result_dma, result_sz,
542 DMA_FROM_DEVICE);
544 if (ctx->cache) {
545 dma_unmap_single(priv->dev, ctx->cache_dma, ctx->cache_sz,
546 DMA_TO_DEVICE);
547 kfree(ctx->cache);
548 ctx->cache = NULL;
549 ctx->cache_sz = 0;
553 void safexcel_complete(struct safexcel_crypto_priv *priv, int ring)
555 struct safexcel_command_desc *cdesc;
557 /* Acknowledge the command descriptors */
558 do {
559 cdesc = safexcel_ring_next_rptr(priv, &priv->ring[ring].cdr);
560 if (IS_ERR(cdesc)) {
561 dev_err(priv->dev,
562 "Could not retrieve the command descriptor\n");
563 return;
565 } while (!cdesc->last_seg);
568 void safexcel_inv_complete(struct crypto_async_request *req, int error)
570 struct safexcel_inv_result *result = req->data;
572 if (error == -EINPROGRESS)
573 return;
575 result->error = error;
576 complete(&result->completion);
579 int safexcel_invalidate_cache(struct crypto_async_request *async,
580 struct safexcel_crypto_priv *priv,
581 dma_addr_t ctxr_dma, int ring,
582 struct safexcel_request *request)
584 struct safexcel_command_desc *cdesc;
585 struct safexcel_result_desc *rdesc;
586 int ret = 0;
588 spin_lock_bh(&priv->ring[ring].egress_lock);
590 /* Prepare command descriptor */
591 cdesc = safexcel_add_cdesc(priv, ring, true, true, 0, 0, 0, ctxr_dma);
592 if (IS_ERR(cdesc)) {
593 ret = PTR_ERR(cdesc);
594 goto unlock;
597 cdesc->control_data.type = EIP197_TYPE_EXTENDED;
598 cdesc->control_data.options = 0;
599 cdesc->control_data.refresh = 0;
600 cdesc->control_data.control0 = CONTEXT_CONTROL_INV_TR;
602 /* Prepare result descriptor */
603 rdesc = safexcel_add_rdesc(priv, ring, true, true, 0, 0);
605 if (IS_ERR(rdesc)) {
606 ret = PTR_ERR(rdesc);
607 goto cdesc_rollback;
610 request->req = async;
611 goto unlock;
613 cdesc_rollback:
614 safexcel_ring_rollback_wptr(priv, &priv->ring[ring].cdr);
616 unlock:
617 spin_unlock_bh(&priv->ring[ring].egress_lock);
618 return ret;
621 static inline void safexcel_handle_result_descriptor(struct safexcel_crypto_priv *priv,
622 int ring)
624 struct safexcel_request *sreq;
625 struct safexcel_context *ctx;
626 int ret, i, nreq, ndesc, tot_descs, done;
627 bool should_complete;
629 handle_results:
630 tot_descs = 0;
632 nreq = readl(EIP197_HIA_RDR(priv, ring) + EIP197_HIA_xDR_PROC_COUNT);
633 nreq >>= EIP197_xDR_PROC_xD_PKT_OFFSET;
634 nreq &= EIP197_xDR_PROC_xD_PKT_MASK;
635 if (!nreq)
636 goto requests_left;
638 for (i = 0; i < nreq; i++) {
639 spin_lock_bh(&priv->ring[ring].egress_lock);
640 sreq = list_first_entry(&priv->ring[ring].list,
641 struct safexcel_request, list);
642 list_del(&sreq->list);
643 spin_unlock_bh(&priv->ring[ring].egress_lock);
645 ctx = crypto_tfm_ctx(sreq->req->tfm);
646 ndesc = ctx->handle_result(priv, ring, sreq->req,
647 &should_complete, &ret);
648 if (ndesc < 0) {
649 kfree(sreq);
650 dev_err(priv->dev, "failed to handle result (%d)", ndesc);
651 goto acknowledge;
654 if (should_complete) {
655 local_bh_disable();
656 sreq->req->complete(sreq->req, ret);
657 local_bh_enable();
660 kfree(sreq);
661 tot_descs += ndesc;
664 acknowledge:
665 if (i) {
666 writel(EIP197_xDR_PROC_xD_PKT(i) |
667 EIP197_xDR_PROC_xD_COUNT(tot_descs * priv->config.rd_offset),
668 EIP197_HIA_RDR(priv, ring) + EIP197_HIA_xDR_PROC_COUNT);
671 /* If the number of requests overflowed the counter, try to proceed more
672 * requests.
674 if (nreq == EIP197_xDR_PROC_xD_PKT_MASK)
675 goto handle_results;
677 requests_left:
678 spin_lock_bh(&priv->ring[ring].egress_lock);
680 done = safexcel_try_push_requests(priv, ring,
681 priv->ring[ring].requests_left);
683 priv->ring[ring].requests_left -= done;
684 if (!done && !priv->ring[ring].requests_left)
685 priv->ring[ring].busy = false;
687 spin_unlock_bh(&priv->ring[ring].egress_lock);
690 static void safexcel_dequeue_work(struct work_struct *work)
692 struct safexcel_work_data *data =
693 container_of(work, struct safexcel_work_data, work);
695 safexcel_dequeue(data->priv, data->ring);
698 struct safexcel_ring_irq_data {
699 struct safexcel_crypto_priv *priv;
700 int ring;
703 static irqreturn_t safexcel_irq_ring(int irq, void *data)
705 struct safexcel_ring_irq_data *irq_data = data;
706 struct safexcel_crypto_priv *priv = irq_data->priv;
707 int ring = irq_data->ring, rc = IRQ_NONE;
708 u32 status, stat;
710 status = readl(EIP197_HIA_AIC_R(priv) + EIP197_HIA_AIC_R_ENABLED_STAT(ring));
711 if (!status)
712 return rc;
714 /* RDR interrupts */
715 if (status & EIP197_RDR_IRQ(ring)) {
716 stat = readl(EIP197_HIA_RDR(priv, ring) + EIP197_HIA_xDR_STAT);
718 if (unlikely(stat & EIP197_xDR_ERR)) {
720 * Fatal error, the RDR is unusable and must be
721 * reinitialized. This should not happen under
722 * normal circumstances.
724 dev_err(priv->dev, "RDR: fatal error.");
725 } else if (likely(stat & EIP197_xDR_THRESH)) {
726 rc = IRQ_WAKE_THREAD;
729 /* ACK the interrupts */
730 writel(stat & 0xff,
731 EIP197_HIA_RDR(priv, ring) + EIP197_HIA_xDR_STAT);
734 /* ACK the interrupts */
735 writel(status, EIP197_HIA_AIC_R(priv) + EIP197_HIA_AIC_R_ACK(ring));
737 return rc;
740 static irqreturn_t safexcel_irq_ring_thread(int irq, void *data)
742 struct safexcel_ring_irq_data *irq_data = data;
743 struct safexcel_crypto_priv *priv = irq_data->priv;
744 int ring = irq_data->ring;
746 safexcel_handle_result_descriptor(priv, ring);
748 queue_work(priv->ring[ring].workqueue,
749 &priv->ring[ring].work_data.work);
751 return IRQ_HANDLED;
754 static int safexcel_request_ring_irq(struct platform_device *pdev, const char *name,
755 irq_handler_t handler,
756 irq_handler_t threaded_handler,
757 struct safexcel_ring_irq_data *ring_irq_priv)
759 int ret, irq = platform_get_irq_byname(pdev, name);
761 if (irq < 0) {
762 dev_err(&pdev->dev, "unable to get IRQ '%s'\n", name);
763 return irq;
766 ret = devm_request_threaded_irq(&pdev->dev, irq, handler,
767 threaded_handler, IRQF_ONESHOT,
768 dev_name(&pdev->dev), ring_irq_priv);
769 if (ret) {
770 dev_err(&pdev->dev, "unable to request IRQ %d\n", irq);
771 return ret;
774 return irq;
777 static struct safexcel_alg_template *safexcel_algs[] = {
778 &safexcel_alg_ecb_aes,
779 &safexcel_alg_cbc_aes,
780 &safexcel_alg_sha1,
781 &safexcel_alg_sha224,
782 &safexcel_alg_sha256,
783 &safexcel_alg_hmac_sha1,
786 static int safexcel_register_algorithms(struct safexcel_crypto_priv *priv)
788 int i, j, ret = 0;
790 for (i = 0; i < ARRAY_SIZE(safexcel_algs); i++) {
791 safexcel_algs[i]->priv = priv;
793 if (safexcel_algs[i]->type == SAFEXCEL_ALG_TYPE_SKCIPHER)
794 ret = crypto_register_skcipher(&safexcel_algs[i]->alg.skcipher);
795 else
796 ret = crypto_register_ahash(&safexcel_algs[i]->alg.ahash);
798 if (ret)
799 goto fail;
802 return 0;
804 fail:
805 for (j = 0; j < i; j++) {
806 if (safexcel_algs[j]->type == SAFEXCEL_ALG_TYPE_SKCIPHER)
807 crypto_unregister_skcipher(&safexcel_algs[j]->alg.skcipher);
808 else
809 crypto_unregister_ahash(&safexcel_algs[j]->alg.ahash);
812 return ret;
815 static void safexcel_unregister_algorithms(struct safexcel_crypto_priv *priv)
817 int i;
819 for (i = 0; i < ARRAY_SIZE(safexcel_algs); i++) {
820 if (safexcel_algs[i]->type == SAFEXCEL_ALG_TYPE_SKCIPHER)
821 crypto_unregister_skcipher(&safexcel_algs[i]->alg.skcipher);
822 else
823 crypto_unregister_ahash(&safexcel_algs[i]->alg.ahash);
827 static void safexcel_configure(struct safexcel_crypto_priv *priv)
829 u32 val, mask;
831 val = readl(EIP197_HIA_AIC_G(priv) + EIP197_HIA_OPTIONS);
832 val = (val & GENMASK(27, 25)) >> 25;
833 mask = BIT(val) - 1;
835 val = readl(EIP197_HIA_AIC_G(priv) + EIP197_HIA_OPTIONS);
836 priv->config.rings = min_t(u32, val & GENMASK(3, 0), max_rings);
838 priv->config.cd_size = (sizeof(struct safexcel_command_desc) / sizeof(u32));
839 priv->config.cd_offset = (priv->config.cd_size + mask) & ~mask;
841 priv->config.rd_size = (sizeof(struct safexcel_result_desc) / sizeof(u32));
842 priv->config.rd_offset = (priv->config.rd_size + mask) & ~mask;
845 static void safexcel_init_register_offsets(struct safexcel_crypto_priv *priv)
847 struct safexcel_register_offsets *offsets = &priv->offsets;
849 if (priv->version == EIP197) {
850 offsets->hia_aic = EIP197_HIA_AIC_BASE;
851 offsets->hia_aic_g = EIP197_HIA_AIC_G_BASE;
852 offsets->hia_aic_r = EIP197_HIA_AIC_R_BASE;
853 offsets->hia_aic_xdr = EIP197_HIA_AIC_xDR_BASE;
854 offsets->hia_dfe = EIP197_HIA_DFE_BASE;
855 offsets->hia_dfe_thr = EIP197_HIA_DFE_THR_BASE;
856 offsets->hia_dse = EIP197_HIA_DSE_BASE;
857 offsets->hia_dse_thr = EIP197_HIA_DSE_THR_BASE;
858 offsets->hia_gen_cfg = EIP197_HIA_GEN_CFG_BASE;
859 offsets->pe = EIP197_PE_BASE;
860 } else {
861 offsets->hia_aic = EIP97_HIA_AIC_BASE;
862 offsets->hia_aic_g = EIP97_HIA_AIC_G_BASE;
863 offsets->hia_aic_r = EIP97_HIA_AIC_R_BASE;
864 offsets->hia_aic_xdr = EIP97_HIA_AIC_xDR_BASE;
865 offsets->hia_dfe = EIP97_HIA_DFE_BASE;
866 offsets->hia_dfe_thr = EIP97_HIA_DFE_THR_BASE;
867 offsets->hia_dse = EIP97_HIA_DSE_BASE;
868 offsets->hia_dse_thr = EIP97_HIA_DSE_THR_BASE;
869 offsets->hia_gen_cfg = EIP97_HIA_GEN_CFG_BASE;
870 offsets->pe = EIP97_PE_BASE;
874 static int safexcel_probe(struct platform_device *pdev)
876 struct device *dev = &pdev->dev;
877 struct resource *res;
878 struct safexcel_crypto_priv *priv;
879 int i, ret;
881 priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
882 if (!priv)
883 return -ENOMEM;
885 priv->dev = dev;
886 priv->version = (enum safexcel_eip_version)of_device_get_match_data(dev);
888 safexcel_init_register_offsets(priv);
890 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
891 priv->base = devm_ioremap_resource(dev, res);
892 if (IS_ERR(priv->base)) {
893 dev_err(dev, "failed to get resource\n");
894 return PTR_ERR(priv->base);
897 priv->clk = of_clk_get(dev->of_node, 0);
898 if (!IS_ERR(priv->clk)) {
899 ret = clk_prepare_enable(priv->clk);
900 if (ret) {
901 dev_err(dev, "unable to enable clk (%d)\n", ret);
902 return ret;
904 } else {
905 /* The clock isn't mandatory */
906 if (PTR_ERR(priv->clk) == -EPROBE_DEFER)
907 return -EPROBE_DEFER;
910 ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
911 if (ret)
912 goto err_clk;
914 priv->context_pool = dmam_pool_create("safexcel-context", dev,
915 sizeof(struct safexcel_context_record),
916 1, 0);
917 if (!priv->context_pool) {
918 ret = -ENOMEM;
919 goto err_clk;
922 safexcel_configure(priv);
924 for (i = 0; i < priv->config.rings; i++) {
925 char irq_name[6] = {0}; /* "ringX\0" */
926 char wq_name[9] = {0}; /* "wq_ringX\0" */
927 int irq;
928 struct safexcel_ring_irq_data *ring_irq;
930 ret = safexcel_init_ring_descriptors(priv,
931 &priv->ring[i].cdr,
932 &priv->ring[i].rdr);
933 if (ret)
934 goto err_clk;
936 ring_irq = devm_kzalloc(dev, sizeof(*ring_irq), GFP_KERNEL);
937 if (!ring_irq) {
938 ret = -ENOMEM;
939 goto err_clk;
942 ring_irq->priv = priv;
943 ring_irq->ring = i;
945 snprintf(irq_name, 6, "ring%d", i);
946 irq = safexcel_request_ring_irq(pdev, irq_name, safexcel_irq_ring,
947 safexcel_irq_ring_thread,
948 ring_irq);
949 if (irq < 0) {
950 ret = irq;
951 goto err_clk;
954 priv->ring[i].work_data.priv = priv;
955 priv->ring[i].work_data.ring = i;
956 INIT_WORK(&priv->ring[i].work_data.work, safexcel_dequeue_work);
958 snprintf(wq_name, 9, "wq_ring%d", i);
959 priv->ring[i].workqueue = create_singlethread_workqueue(wq_name);
960 if (!priv->ring[i].workqueue) {
961 ret = -ENOMEM;
962 goto err_clk;
965 priv->ring[i].requests_left = 0;
966 priv->ring[i].busy = false;
968 crypto_init_queue(&priv->ring[i].queue,
969 EIP197_DEFAULT_RING_SIZE);
971 INIT_LIST_HEAD(&priv->ring[i].list);
972 spin_lock_init(&priv->ring[i].lock);
973 spin_lock_init(&priv->ring[i].egress_lock);
974 spin_lock_init(&priv->ring[i].queue_lock);
977 platform_set_drvdata(pdev, priv);
978 atomic_set(&priv->ring_used, 0);
980 ret = safexcel_hw_init(priv);
981 if (ret) {
982 dev_err(dev, "EIP h/w init failed (%d)\n", ret);
983 goto err_clk;
986 ret = safexcel_register_algorithms(priv);
987 if (ret) {
988 dev_err(dev, "Failed to register algorithms (%d)\n", ret);
989 goto err_clk;
992 return 0;
994 err_clk:
995 clk_disable_unprepare(priv->clk);
996 return ret;
1000 static int safexcel_remove(struct platform_device *pdev)
1002 struct safexcel_crypto_priv *priv = platform_get_drvdata(pdev);
1003 int i;
1005 safexcel_unregister_algorithms(priv);
1006 clk_disable_unprepare(priv->clk);
1008 for (i = 0; i < priv->config.rings; i++)
1009 destroy_workqueue(priv->ring[i].workqueue);
1011 return 0;
1014 static const struct of_device_id safexcel_of_match_table[] = {
1016 .compatible = "inside-secure,safexcel-eip97",
1017 .data = (void *)EIP97,
1020 .compatible = "inside-secure,safexcel-eip197",
1021 .data = (void *)EIP197,
1027 static struct platform_driver crypto_safexcel = {
1028 .probe = safexcel_probe,
1029 .remove = safexcel_remove,
1030 .driver = {
1031 .name = "crypto-safexcel",
1032 .of_match_table = safexcel_of_match_table,
1035 module_platform_driver(crypto_safexcel);
1037 MODULE_AUTHOR("Antoine Tenart <antoine.tenart@free-electrons.com>");
1038 MODULE_AUTHOR("Ofer Heifetz <oferh@marvell.com>");
1039 MODULE_AUTHOR("Igal Liberman <igall@marvell.com>");
1040 MODULE_DESCRIPTION("Support for SafeXcel cryptographic engine EIP197");
1041 MODULE_LICENSE("GPL v2");