2 * Copyright (C) 2017 Marvell
4 * Antoine Tenart <antoine.tenart@free-electrons.com>
6 * This file is licensed under the terms of the GNU General Public
7 * License version 2. This program is licensed "as is" without any
8 * warranty of any kind, whether express or implied.
11 #include <linux/clk.h>
12 #include <linux/device.h>
13 #include <linux/dma-mapping.h>
14 #include <linux/dmapool.h>
15 #include <linux/firmware.h>
16 #include <linux/interrupt.h>
17 #include <linux/module.h>
18 #include <linux/of_platform.h>
19 #include <linux/of_irq.h>
20 #include <linux/platform_device.h>
21 #include <linux/workqueue.h>
23 #include <crypto/internal/aead.h>
24 #include <crypto/internal/hash.h>
25 #include <crypto/internal/skcipher.h>
29 static u32 max_rings
= EIP197_MAX_RINGS
;
30 module_param(max_rings
, uint
, 0644);
31 MODULE_PARM_DESC(max_rings
, "Maximum number of rings to use.");
33 static void eip197_trc_cache_init(struct safexcel_crypto_priv
*priv
)
35 u32 val
, htable_offset
;
38 /* Enable the record cache memory access */
39 val
= readl(priv
->base
+ EIP197_CS_RAM_CTRL
);
40 val
&= ~EIP197_TRC_ENABLE_MASK
;
41 val
|= EIP197_TRC_ENABLE_0
;
42 writel(val
, priv
->base
+ EIP197_CS_RAM_CTRL
);
44 /* Clear all ECC errors */
45 writel(0, priv
->base
+ EIP197_TRC_ECCCTRL
);
48 * Make sure the cache memory is accessible by taking record cache into
51 val
= readl(priv
->base
+ EIP197_TRC_PARAMS
);
52 val
|= EIP197_TRC_PARAMS_SW_RESET
;
53 val
&= ~EIP197_TRC_PARAMS_DATA_ACCESS
;
54 writel(val
, priv
->base
+ EIP197_TRC_PARAMS
);
56 /* Clear all records */
57 for (i
= 0; i
< EIP197_CS_RC_MAX
; i
++) {
58 u32 val
, offset
= EIP197_CLASSIFICATION_RAMS
+ i
* EIP197_CS_RC_SIZE
;
60 writel(EIP197_CS_RC_NEXT(EIP197_RC_NULL
) |
61 EIP197_CS_RC_PREV(EIP197_RC_NULL
),
64 val
= EIP197_CS_RC_NEXT(i
+1) | EIP197_CS_RC_PREV(i
-1);
66 val
|= EIP197_CS_RC_PREV(EIP197_RC_NULL
);
67 else if (i
== EIP197_CS_RC_MAX
- 1)
68 val
|= EIP197_CS_RC_NEXT(EIP197_RC_NULL
);
69 writel(val
, priv
->base
+ offset
+ sizeof(u32
));
72 /* Clear the hash table entries */
73 htable_offset
= EIP197_CS_RC_MAX
* EIP197_CS_RC_SIZE
;
74 for (i
= 0; i
< 64; i
++)
75 writel(GENMASK(29, 0),
76 priv
->base
+ EIP197_CLASSIFICATION_RAMS
+ htable_offset
+ i
* sizeof(u32
));
78 /* Disable the record cache memory access */
79 val
= readl(priv
->base
+ EIP197_CS_RAM_CTRL
);
80 val
&= ~EIP197_TRC_ENABLE_MASK
;
81 writel(val
, priv
->base
+ EIP197_CS_RAM_CTRL
);
83 /* Write head and tail pointers of the record free chain */
84 val
= EIP197_TRC_FREECHAIN_HEAD_PTR(0) |
85 EIP197_TRC_FREECHAIN_TAIL_PTR(EIP197_CS_RC_MAX
- 1);
86 writel(val
, priv
->base
+ EIP197_TRC_FREECHAIN
);
88 /* Configure the record cache #1 */
89 val
= EIP197_TRC_PARAMS2_RC_SZ_SMALL(EIP197_CS_TRC_REC_WC
) |
90 EIP197_TRC_PARAMS2_HTABLE_PTR(EIP197_CS_RC_MAX
);
91 writel(val
, priv
->base
+ EIP197_TRC_PARAMS2
);
93 /* Configure the record cache #2 */
94 val
= EIP197_TRC_PARAMS_RC_SZ_LARGE(EIP197_CS_TRC_LG_REC_WC
) |
95 EIP197_TRC_PARAMS_BLK_TIMER_SPEED(1) |
96 EIP197_TRC_PARAMS_HTABLE_SZ(2);
97 writel(val
, priv
->base
+ EIP197_TRC_PARAMS
);
100 static void eip197_write_firmware(struct safexcel_crypto_priv
*priv
,
101 const struct firmware
*fw
, u32 ctrl
,
104 const u32
*data
= (const u32
*)fw
->data
;
108 /* Reset the engine to make its program memory accessible */
109 writel(EIP197_PE_ICE_x_CTRL_SW_RESET
|
110 EIP197_PE_ICE_x_CTRL_CLR_ECC_CORR
|
111 EIP197_PE_ICE_x_CTRL_CLR_ECC_NON_CORR
,
112 EIP197_PE(priv
) + ctrl
);
114 /* Enable access to the program memory */
115 writel(prog_en
, EIP197_PE(priv
) + EIP197_PE_ICE_RAM_CTRL
);
117 /* Write the firmware */
118 for (i
= 0; i
< fw
->size
/ sizeof(u32
); i
++)
119 writel(be32_to_cpu(data
[i
]),
120 priv
->base
+ EIP197_CLASSIFICATION_RAMS
+ i
* sizeof(u32
));
122 /* Disable access to the program memory */
123 writel(0, EIP197_PE(priv
) + EIP197_PE_ICE_RAM_CTRL
);
125 /* Release engine from reset */
126 val
= readl(EIP197_PE(priv
) + ctrl
);
127 val
&= ~EIP197_PE_ICE_x_CTRL_SW_RESET
;
128 writel(val
, EIP197_PE(priv
) + ctrl
);
131 static int eip197_load_firmwares(struct safexcel_crypto_priv
*priv
)
133 const char *fw_name
[] = {"ifpp.bin", "ipue.bin"};
134 const struct firmware
*fw
[FW_NB
];
138 for (i
= 0; i
< FW_NB
; i
++) {
139 ret
= request_firmware(&fw
[i
], fw_name
[i
], priv
->dev
);
142 "Failed to request firmware %s (%d)\n",
148 /* Clear the scratchpad memory */
149 val
= readl(EIP197_PE(priv
) + EIP197_PE_ICE_SCRATCH_CTRL
);
150 val
|= EIP197_PE_ICE_SCRATCH_CTRL_CHANGE_TIMER
|
151 EIP197_PE_ICE_SCRATCH_CTRL_TIMER_EN
|
152 EIP197_PE_ICE_SCRATCH_CTRL_SCRATCH_ACCESS
|
153 EIP197_PE_ICE_SCRATCH_CTRL_CHANGE_ACCESS
;
154 writel(val
, EIP197_PE(priv
) + EIP197_PE_ICE_SCRATCH_CTRL
);
156 memset_io(EIP197_PE(priv
) + EIP197_PE_ICE_SCRATCH_RAM
, 0,
157 EIP197_NUM_OF_SCRATCH_BLOCKS
* sizeof(u32
));
159 eip197_write_firmware(priv
, fw
[FW_IFPP
], EIP197_PE_ICE_FPP_CTRL
,
160 EIP197_PE_ICE_RAM_CTRL_FPP_PROG_EN
);
162 eip197_write_firmware(priv
, fw
[FW_IPUE
], EIP197_PE_ICE_PUE_CTRL
,
163 EIP197_PE_ICE_RAM_CTRL_PUE_PROG_EN
);
166 for (j
= 0; j
< i
; j
++)
167 release_firmware(fw
[j
]);
172 static int safexcel_hw_setup_cdesc_rings(struct safexcel_crypto_priv
*priv
)
174 u32 hdw
, cd_size_rnd
, val
;
177 hdw
= readl(EIP197_HIA_AIC_G(priv
) + EIP197_HIA_OPTIONS
);
178 hdw
&= GENMASK(27, 25);
181 cd_size_rnd
= (priv
->config
.cd_size
+ (BIT(hdw
) - 1)) >> hdw
;
183 for (i
= 0; i
< priv
->config
.rings
; i
++) {
184 /* ring base address */
185 writel(lower_32_bits(priv
->ring
[i
].cdr
.base_dma
),
186 EIP197_HIA_CDR(priv
, i
) + EIP197_HIA_xDR_RING_BASE_ADDR_LO
);
187 writel(upper_32_bits(priv
->ring
[i
].cdr
.base_dma
),
188 EIP197_HIA_CDR(priv
, i
) + EIP197_HIA_xDR_RING_BASE_ADDR_HI
);
190 writel(EIP197_xDR_DESC_MODE_64BIT
| (priv
->config
.cd_offset
<< 16) |
191 priv
->config
.cd_size
,
192 EIP197_HIA_CDR(priv
, i
) + EIP197_HIA_xDR_DESC_SIZE
);
193 writel(((EIP197_FETCH_COUNT
* (cd_size_rnd
<< hdw
)) << 16) |
194 (EIP197_FETCH_COUNT
* priv
->config
.cd_offset
),
195 EIP197_HIA_CDR(priv
, i
) + EIP197_HIA_xDR_CFG
);
197 /* Configure DMA tx control */
198 val
= EIP197_HIA_xDR_CFG_WR_CACHE(WR_CACHE_3BITS
);
199 val
|= EIP197_HIA_xDR_CFG_RD_CACHE(RD_CACHE_3BITS
);
200 writel(val
, EIP197_HIA_CDR(priv
, i
) + EIP197_HIA_xDR_DMA_CFG
);
202 /* clear any pending interrupt */
203 writel(GENMASK(5, 0),
204 EIP197_HIA_CDR(priv
, i
) + EIP197_HIA_xDR_STAT
);
210 static int safexcel_hw_setup_rdesc_rings(struct safexcel_crypto_priv
*priv
)
212 u32 hdw
, rd_size_rnd
, val
;
215 hdw
= readl(EIP197_HIA_AIC_G(priv
) + EIP197_HIA_OPTIONS
);
216 hdw
&= GENMASK(27, 25);
219 rd_size_rnd
= (priv
->config
.rd_size
+ (BIT(hdw
) - 1)) >> hdw
;
221 for (i
= 0; i
< priv
->config
.rings
; i
++) {
222 /* ring base address */
223 writel(lower_32_bits(priv
->ring
[i
].rdr
.base_dma
),
224 EIP197_HIA_RDR(priv
, i
) + EIP197_HIA_xDR_RING_BASE_ADDR_LO
);
225 writel(upper_32_bits(priv
->ring
[i
].rdr
.base_dma
),
226 EIP197_HIA_RDR(priv
, i
) + EIP197_HIA_xDR_RING_BASE_ADDR_HI
);
228 writel(EIP197_xDR_DESC_MODE_64BIT
| (priv
->config
.rd_offset
<< 16) |
229 priv
->config
.rd_size
,
230 EIP197_HIA_RDR(priv
, i
) + EIP197_HIA_xDR_DESC_SIZE
);
232 writel(((EIP197_FETCH_COUNT
* (rd_size_rnd
<< hdw
)) << 16) |
233 (EIP197_FETCH_COUNT
* priv
->config
.rd_offset
),
234 EIP197_HIA_RDR(priv
, i
) + EIP197_HIA_xDR_CFG
);
236 /* Configure DMA tx control */
237 val
= EIP197_HIA_xDR_CFG_WR_CACHE(WR_CACHE_3BITS
);
238 val
|= EIP197_HIA_xDR_CFG_RD_CACHE(RD_CACHE_3BITS
);
239 val
|= EIP197_HIA_xDR_WR_RES_BUF
| EIP197_HIA_xDR_WR_CTRL_BUF
;
241 EIP197_HIA_RDR(priv
, i
) + EIP197_HIA_xDR_DMA_CFG
);
243 /* clear any pending interrupt */
244 writel(GENMASK(7, 0),
245 EIP197_HIA_RDR(priv
, i
) + EIP197_HIA_xDR_STAT
);
247 /* enable ring interrupt */
248 val
= readl(EIP197_HIA_AIC_R(priv
) + EIP197_HIA_AIC_R_ENABLE_CTRL(i
));
249 val
|= EIP197_RDR_IRQ(i
);
250 writel(val
, EIP197_HIA_AIC_R(priv
) + EIP197_HIA_AIC_R_ENABLE_CTRL(i
));
256 static int safexcel_hw_init(struct safexcel_crypto_priv
*priv
)
261 /* Determine endianess and configure byte swap */
262 version
= readl(EIP197_HIA_AIC(priv
) + EIP197_HIA_VERSION
);
263 val
= readl(EIP197_HIA_AIC(priv
) + EIP197_HIA_MST_CTRL
);
265 if ((version
& 0xffff) == EIP197_HIA_VERSION_BE
)
266 val
|= EIP197_MST_CTRL_BYTE_SWAP
;
267 else if (((version
>> 16) & 0xffff) == EIP197_HIA_VERSION_LE
)
268 val
|= (EIP197_MST_CTRL_NO_BYTE_SWAP
>> 24);
270 writel(val
, EIP197_HIA_AIC(priv
) + EIP197_HIA_MST_CTRL
);
272 /* Configure wr/rd cache values */
273 writel(EIP197_MST_CTRL_RD_CACHE(RD_CACHE_4BITS
) |
274 EIP197_MST_CTRL_WD_CACHE(WR_CACHE_4BITS
),
275 EIP197_HIA_GEN_CFG(priv
) + EIP197_MST_CTRL
);
277 /* Interrupts reset */
279 /* Disable all global interrupts */
280 writel(0, EIP197_HIA_AIC_G(priv
) + EIP197_HIA_AIC_G_ENABLE_CTRL
);
282 /* Clear any pending interrupt */
283 writel(GENMASK(31, 0), EIP197_HIA_AIC_G(priv
) + EIP197_HIA_AIC_G_ACK
);
285 /* Data Fetch Engine configuration */
287 /* Reset all DFE threads */
288 writel(EIP197_DxE_THR_CTRL_RESET_PE
,
289 EIP197_HIA_DFE_THR(priv
) + EIP197_HIA_DFE_THR_CTRL
);
291 if (priv
->version
== EIP197
) {
292 /* Reset HIA input interface arbiter */
293 writel(EIP197_HIA_RA_PE_CTRL_RESET
,
294 EIP197_HIA_AIC(priv
) + EIP197_HIA_RA_PE_CTRL
);
297 /* DMA transfer size to use */
298 val
= EIP197_HIA_DFE_CFG_DIS_DEBUG
;
299 val
|= EIP197_HIA_DxE_CFG_MIN_DATA_SIZE(5) | EIP197_HIA_DxE_CFG_MAX_DATA_SIZE(9);
300 val
|= EIP197_HIA_DxE_CFG_MIN_CTRL_SIZE(5) | EIP197_HIA_DxE_CFG_MAX_CTRL_SIZE(7);
301 val
|= EIP197_HIA_DxE_CFG_DATA_CACHE_CTRL(RD_CACHE_3BITS
);
302 val
|= EIP197_HIA_DxE_CFG_CTRL_CACHE_CTRL(RD_CACHE_3BITS
);
303 writel(val
, EIP197_HIA_DFE(priv
) + EIP197_HIA_DFE_CFG
);
305 /* Leave the DFE threads reset state */
306 writel(0, EIP197_HIA_DFE_THR(priv
) + EIP197_HIA_DFE_THR_CTRL
);
308 /* Configure the procesing engine thresholds */
309 writel(EIP197_PE_IN_xBUF_THRES_MIN(5) | EIP197_PE_IN_xBUF_THRES_MAX(9),
310 EIP197_PE(priv
) + EIP197_PE_IN_DBUF_THRES
);
311 writel(EIP197_PE_IN_xBUF_THRES_MIN(5) | EIP197_PE_IN_xBUF_THRES_MAX(7),
312 EIP197_PE(priv
) + EIP197_PE_IN_TBUF_THRES
);
314 if (priv
->version
== EIP197
) {
315 /* enable HIA input interface arbiter and rings */
316 writel(EIP197_HIA_RA_PE_CTRL_EN
|
317 GENMASK(priv
->config
.rings
- 1, 0),
318 EIP197_HIA_AIC(priv
) + EIP197_HIA_RA_PE_CTRL
);
321 /* Data Store Engine configuration */
323 /* Reset all DSE threads */
324 writel(EIP197_DxE_THR_CTRL_RESET_PE
,
325 EIP197_HIA_DSE_THR(priv
) + EIP197_HIA_DSE_THR_CTRL
);
327 /* Wait for all DSE threads to complete */
328 while ((readl(EIP197_HIA_DSE_THR(priv
) + EIP197_HIA_DSE_THR_STAT
) &
329 GENMASK(15, 12)) != GENMASK(15, 12))
332 /* DMA transfer size to use */
333 val
= EIP197_HIA_DSE_CFG_DIS_DEBUG
;
334 val
|= EIP197_HIA_DxE_CFG_MIN_DATA_SIZE(7) | EIP197_HIA_DxE_CFG_MAX_DATA_SIZE(8);
335 val
|= EIP197_HIA_DxE_CFG_DATA_CACHE_CTRL(WR_CACHE_3BITS
);
336 val
|= EIP197_HIA_DSE_CFG_ALWAYS_BUFFERABLE
;
337 /* FIXME: instability issues can occur for EIP97 but disabling it impact
340 if (priv
->version
== EIP197
)
341 val
|= EIP197_HIA_DSE_CFG_EN_SINGLE_WR
;
342 writel(val
, EIP197_HIA_DSE(priv
) + EIP197_HIA_DSE_CFG
);
344 /* Leave the DSE threads reset state */
345 writel(0, EIP197_HIA_DSE_THR(priv
) + EIP197_HIA_DSE_THR_CTRL
);
347 /* Configure the procesing engine thresholds */
348 writel(EIP197_PE_OUT_DBUF_THRES_MIN(7) | EIP197_PE_OUT_DBUF_THRES_MAX(8),
349 EIP197_PE(priv
) + EIP197_PE_OUT_DBUF_THRES
);
351 /* Processing Engine configuration */
353 /* H/W capabilities selection */
354 val
= EIP197_FUNCTION_RSVD
;
355 val
|= EIP197_PROTOCOL_ENCRYPT_ONLY
| EIP197_PROTOCOL_HASH_ONLY
;
356 val
|= EIP197_PROTOCOL_ENCRYPT_HASH
| EIP197_PROTOCOL_HASH_DECRYPT
;
357 val
|= EIP197_ALG_AES_ECB
| EIP197_ALG_AES_CBC
;
358 val
|= EIP197_ALG_SHA1
| EIP197_ALG_HMAC_SHA1
;
359 val
|= EIP197_ALG_SHA2
| EIP197_ALG_HMAC_SHA2
;
360 writel(val
, EIP197_PE(priv
) + EIP197_PE_EIP96_FUNCTION_EN
);
362 /* Command Descriptor Rings prepare */
363 for (i
= 0; i
< priv
->config
.rings
; i
++) {
364 /* Clear interrupts for this ring */
365 writel(GENMASK(31, 0),
366 EIP197_HIA_AIC_R(priv
) + EIP197_HIA_AIC_R_ENABLE_CLR(i
));
368 /* Disable external triggering */
369 writel(0, EIP197_HIA_CDR(priv
, i
) + EIP197_HIA_xDR_CFG
);
371 /* Clear the pending prepared counter */
372 writel(EIP197_xDR_PREP_CLR_COUNT
,
373 EIP197_HIA_CDR(priv
, i
) + EIP197_HIA_xDR_PREP_COUNT
);
375 /* Clear the pending processed counter */
376 writel(EIP197_xDR_PROC_CLR_COUNT
,
377 EIP197_HIA_CDR(priv
, i
) + EIP197_HIA_xDR_PROC_COUNT
);
380 EIP197_HIA_CDR(priv
, i
) + EIP197_HIA_xDR_PREP_PNTR
);
382 EIP197_HIA_CDR(priv
, i
) + EIP197_HIA_xDR_PROC_PNTR
);
384 writel((EIP197_DEFAULT_RING_SIZE
* priv
->config
.cd_offset
) << 2,
385 EIP197_HIA_CDR(priv
, i
) + EIP197_HIA_xDR_RING_SIZE
);
388 /* Result Descriptor Ring prepare */
389 for (i
= 0; i
< priv
->config
.rings
; i
++) {
390 /* Disable external triggering*/
391 writel(0, EIP197_HIA_RDR(priv
, i
) + EIP197_HIA_xDR_CFG
);
393 /* Clear the pending prepared counter */
394 writel(EIP197_xDR_PREP_CLR_COUNT
,
395 EIP197_HIA_RDR(priv
, i
) + EIP197_HIA_xDR_PREP_COUNT
);
397 /* Clear the pending processed counter */
398 writel(EIP197_xDR_PROC_CLR_COUNT
,
399 EIP197_HIA_RDR(priv
, i
) + EIP197_HIA_xDR_PROC_COUNT
);
402 EIP197_HIA_RDR(priv
, i
) + EIP197_HIA_xDR_PREP_PNTR
);
404 EIP197_HIA_RDR(priv
, i
) + EIP197_HIA_xDR_PROC_PNTR
);
407 writel((EIP197_DEFAULT_RING_SIZE
* priv
->config
.rd_offset
) << 2,
408 EIP197_HIA_RDR(priv
, i
) + EIP197_HIA_xDR_RING_SIZE
);
411 /* Enable command descriptor rings */
412 writel(EIP197_DxE_THR_CTRL_EN
| GENMASK(priv
->config
.rings
- 1, 0),
413 EIP197_HIA_DFE_THR(priv
) + EIP197_HIA_DFE_THR_CTRL
);
415 /* Enable result descriptor rings */
416 writel(EIP197_DxE_THR_CTRL_EN
| GENMASK(priv
->config
.rings
- 1, 0),
417 EIP197_HIA_DSE_THR(priv
) + EIP197_HIA_DSE_THR_CTRL
);
419 /* Clear any HIA interrupt */
420 writel(GENMASK(30, 20), EIP197_HIA_AIC_G(priv
) + EIP197_HIA_AIC_G_ACK
);
422 if (priv
->version
== EIP197
) {
423 eip197_trc_cache_init(priv
);
425 ret
= eip197_load_firmwares(priv
);
430 safexcel_hw_setup_cdesc_rings(priv
);
431 safexcel_hw_setup_rdesc_rings(priv
);
436 /* Called with ring's lock taken */
437 static void safexcel_try_push_requests(struct safexcel_crypto_priv
*priv
,
440 int coal
= min_t(int, priv
->ring
[ring
].requests
, EIP197_MAX_BATCH_SZ
);
445 /* Configure when we want an interrupt */
446 writel(EIP197_HIA_RDR_THRESH_PKT_MODE
|
447 EIP197_HIA_RDR_THRESH_PROC_PKT(coal
),
448 EIP197_HIA_RDR(priv
, ring
) + EIP197_HIA_xDR_THRESH
);
451 void safexcel_dequeue(struct safexcel_crypto_priv
*priv
, int ring
)
453 struct crypto_async_request
*req
, *backlog
;
454 struct safexcel_context
*ctx
;
455 struct safexcel_request
*request
;
456 int ret
, nreq
= 0, cdesc
= 0, rdesc
= 0, commands
, results
;
458 /* If a request wasn't properly dequeued because of a lack of resources,
459 * proceeded it first,
461 req
= priv
->ring
[ring
].req
;
462 backlog
= priv
->ring
[ring
].backlog
;
467 spin_lock_bh(&priv
->ring
[ring
].queue_lock
);
468 backlog
= crypto_get_backlog(&priv
->ring
[ring
].queue
);
469 req
= crypto_dequeue_request(&priv
->ring
[ring
].queue
);
470 spin_unlock_bh(&priv
->ring
[ring
].queue_lock
);
473 priv
->ring
[ring
].req
= NULL
;
474 priv
->ring
[ring
].backlog
= NULL
;
479 request
= kzalloc(sizeof(*request
), EIP197_GFP_FLAGS(*req
));
483 ctx
= crypto_tfm_ctx(req
->tfm
);
484 ret
= ctx
->send(req
, ring
, request
, &commands
, &results
);
491 backlog
->complete(backlog
, -EINPROGRESS
);
493 /* In case the send() helper did not issue any command to push
494 * to the engine because the input data was cached, continue to
495 * dequeue other requests as this is valid and not an error.
497 if (!commands
&& !results
) {
502 spin_lock_bh(&priv
->ring
[ring
].egress_lock
);
503 list_add_tail(&request
->list
, &priv
->ring
[ring
].list
);
504 spin_unlock_bh(&priv
->ring
[ring
].egress_lock
);
512 /* Not enough resources to handle all the requests. Bail out and save
513 * the request and the backlog for the next dequeue call (per-ring).
515 priv
->ring
[ring
].req
= req
;
516 priv
->ring
[ring
].backlog
= backlog
;
522 spin_lock_bh(&priv
->ring
[ring
].egress_lock
);
524 priv
->ring
[ring
].requests
+= nreq
;
526 if (!priv
->ring
[ring
].busy
) {
527 safexcel_try_push_requests(priv
, ring
);
528 priv
->ring
[ring
].busy
= true;
531 spin_unlock_bh(&priv
->ring
[ring
].egress_lock
);
533 /* let the RDR know we have pending descriptors */
534 writel((rdesc
* priv
->config
.rd_offset
) << 2,
535 EIP197_HIA_RDR(priv
, ring
) + EIP197_HIA_xDR_PREP_COUNT
);
537 /* let the CDR know we have pending descriptors */
538 writel((cdesc
* priv
->config
.cd_offset
) << 2,
539 EIP197_HIA_CDR(priv
, ring
) + EIP197_HIA_xDR_PREP_COUNT
);
542 inline int safexcel_rdesc_check_errors(struct safexcel_crypto_priv
*priv
,
543 struct safexcel_result_desc
*rdesc
)
545 if (likely(!rdesc
->result_data
.error_code
))
548 if (rdesc
->result_data
.error_code
& 0x407f) {
549 /* Fatal error (bits 0-7, 14) */
551 "cipher: result: result descriptor error (%d)\n",
552 rdesc
->result_data
.error_code
);
554 } else if (rdesc
->result_data
.error_code
== BIT(9)) {
555 /* Authentication failed */
559 /* All other non-fatal errors */
563 void safexcel_complete(struct safexcel_crypto_priv
*priv
, int ring
)
565 struct safexcel_command_desc
*cdesc
;
567 /* Acknowledge the command descriptors */
569 cdesc
= safexcel_ring_next_rptr(priv
, &priv
->ring
[ring
].cdr
);
572 "Could not retrieve the command descriptor\n");
575 } while (!cdesc
->last_seg
);
578 void safexcel_inv_complete(struct crypto_async_request
*req
, int error
)
580 struct safexcel_inv_result
*result
= req
->data
;
582 if (error
== -EINPROGRESS
)
585 result
->error
= error
;
586 complete(&result
->completion
);
589 int safexcel_invalidate_cache(struct crypto_async_request
*async
,
590 struct safexcel_crypto_priv
*priv
,
591 dma_addr_t ctxr_dma
, int ring
,
592 struct safexcel_request
*request
)
594 struct safexcel_command_desc
*cdesc
;
595 struct safexcel_result_desc
*rdesc
;
598 spin_lock_bh(&priv
->ring
[ring
].egress_lock
);
600 /* Prepare command descriptor */
601 cdesc
= safexcel_add_cdesc(priv
, ring
, true, true, 0, 0, 0, ctxr_dma
);
603 ret
= PTR_ERR(cdesc
);
607 cdesc
->control_data
.type
= EIP197_TYPE_EXTENDED
;
608 cdesc
->control_data
.options
= 0;
609 cdesc
->control_data
.refresh
= 0;
610 cdesc
->control_data
.control0
= CONTEXT_CONTROL_INV_TR
;
612 /* Prepare result descriptor */
613 rdesc
= safexcel_add_rdesc(priv
, ring
, true, true, 0, 0);
616 ret
= PTR_ERR(rdesc
);
620 request
->req
= async
;
624 safexcel_ring_rollback_wptr(priv
, &priv
->ring
[ring
].cdr
);
627 spin_unlock_bh(&priv
->ring
[ring
].egress_lock
);
631 static inline void safexcel_handle_result_descriptor(struct safexcel_crypto_priv
*priv
,
634 struct safexcel_request
*sreq
;
635 struct safexcel_context
*ctx
;
636 int ret
, i
, nreq
, ndesc
, tot_descs
, handled
= 0;
637 bool should_complete
;
642 nreq
= readl(EIP197_HIA_RDR(priv
, ring
) + EIP197_HIA_xDR_PROC_COUNT
);
643 nreq
>>= EIP197_xDR_PROC_xD_PKT_OFFSET
;
644 nreq
&= EIP197_xDR_PROC_xD_PKT_MASK
;
648 for (i
= 0; i
< nreq
; i
++) {
649 spin_lock_bh(&priv
->ring
[ring
].egress_lock
);
650 sreq
= list_first_entry(&priv
->ring
[ring
].list
,
651 struct safexcel_request
, list
);
652 list_del(&sreq
->list
);
653 spin_unlock_bh(&priv
->ring
[ring
].egress_lock
);
655 ctx
= crypto_tfm_ctx(sreq
->req
->tfm
);
656 ndesc
= ctx
->handle_result(priv
, ring
, sreq
->req
,
657 &should_complete
, &ret
);
660 dev_err(priv
->dev
, "failed to handle result (%d)", ndesc
);
664 if (should_complete
) {
666 sreq
->req
->complete(sreq
->req
, ret
);
677 writel(EIP197_xDR_PROC_xD_PKT(i
) |
678 EIP197_xDR_PROC_xD_COUNT(tot_descs
* priv
->config
.rd_offset
),
679 EIP197_HIA_RDR(priv
, ring
) + EIP197_HIA_xDR_PROC_COUNT
);
682 /* If the number of requests overflowed the counter, try to proceed more
685 if (nreq
== EIP197_xDR_PROC_xD_PKT_MASK
)
689 spin_lock_bh(&priv
->ring
[ring
].egress_lock
);
691 priv
->ring
[ring
].requests
-= handled
;
692 safexcel_try_push_requests(priv
, ring
);
694 if (!priv
->ring
[ring
].requests
)
695 priv
->ring
[ring
].busy
= false;
697 spin_unlock_bh(&priv
->ring
[ring
].egress_lock
);
700 static void safexcel_dequeue_work(struct work_struct
*work
)
702 struct safexcel_work_data
*data
=
703 container_of(work
, struct safexcel_work_data
, work
);
705 safexcel_dequeue(data
->priv
, data
->ring
);
708 struct safexcel_ring_irq_data
{
709 struct safexcel_crypto_priv
*priv
;
713 static irqreturn_t
safexcel_irq_ring(int irq
, void *data
)
715 struct safexcel_ring_irq_data
*irq_data
= data
;
716 struct safexcel_crypto_priv
*priv
= irq_data
->priv
;
717 int ring
= irq_data
->ring
, rc
= IRQ_NONE
;
720 status
= readl(EIP197_HIA_AIC_R(priv
) + EIP197_HIA_AIC_R_ENABLED_STAT(ring
));
725 if (status
& EIP197_RDR_IRQ(ring
)) {
726 stat
= readl(EIP197_HIA_RDR(priv
, ring
) + EIP197_HIA_xDR_STAT
);
728 if (unlikely(stat
& EIP197_xDR_ERR
)) {
730 * Fatal error, the RDR is unusable and must be
731 * reinitialized. This should not happen under
732 * normal circumstances.
734 dev_err(priv
->dev
, "RDR: fatal error.");
735 } else if (likely(stat
& EIP197_xDR_THRESH
)) {
736 rc
= IRQ_WAKE_THREAD
;
739 /* ACK the interrupts */
741 EIP197_HIA_RDR(priv
, ring
) + EIP197_HIA_xDR_STAT
);
744 /* ACK the interrupts */
745 writel(status
, EIP197_HIA_AIC_R(priv
) + EIP197_HIA_AIC_R_ACK(ring
));
750 static irqreturn_t
safexcel_irq_ring_thread(int irq
, void *data
)
752 struct safexcel_ring_irq_data
*irq_data
= data
;
753 struct safexcel_crypto_priv
*priv
= irq_data
->priv
;
754 int ring
= irq_data
->ring
;
756 safexcel_handle_result_descriptor(priv
, ring
);
758 queue_work(priv
->ring
[ring
].workqueue
,
759 &priv
->ring
[ring
].work_data
.work
);
764 static int safexcel_request_ring_irq(struct platform_device
*pdev
, const char *name
,
765 irq_handler_t handler
,
766 irq_handler_t threaded_handler
,
767 struct safexcel_ring_irq_data
*ring_irq_priv
)
769 int ret
, irq
= platform_get_irq_byname(pdev
, name
);
772 dev_err(&pdev
->dev
, "unable to get IRQ '%s'\n", name
);
776 ret
= devm_request_threaded_irq(&pdev
->dev
, irq
, handler
,
777 threaded_handler
, IRQF_ONESHOT
,
778 dev_name(&pdev
->dev
), ring_irq_priv
);
780 dev_err(&pdev
->dev
, "unable to request IRQ %d\n", irq
);
787 static struct safexcel_alg_template
*safexcel_algs
[] = {
788 &safexcel_alg_ecb_aes
,
789 &safexcel_alg_cbc_aes
,
791 &safexcel_alg_sha224
,
792 &safexcel_alg_sha256
,
793 &safexcel_alg_hmac_sha1
,
794 &safexcel_alg_hmac_sha224
,
795 &safexcel_alg_hmac_sha256
,
796 &safexcel_alg_authenc_hmac_sha1_cbc_aes
,
797 &safexcel_alg_authenc_hmac_sha224_cbc_aes
,
798 &safexcel_alg_authenc_hmac_sha256_cbc_aes
,
801 static int safexcel_register_algorithms(struct safexcel_crypto_priv
*priv
)
805 for (i
= 0; i
< ARRAY_SIZE(safexcel_algs
); i
++) {
806 safexcel_algs
[i
]->priv
= priv
;
808 if (safexcel_algs
[i
]->type
== SAFEXCEL_ALG_TYPE_SKCIPHER
)
809 ret
= crypto_register_skcipher(&safexcel_algs
[i
]->alg
.skcipher
);
810 else if (safexcel_algs
[i
]->type
== SAFEXCEL_ALG_TYPE_AEAD
)
811 ret
= crypto_register_aead(&safexcel_algs
[i
]->alg
.aead
);
813 ret
= crypto_register_ahash(&safexcel_algs
[i
]->alg
.ahash
);
822 for (j
= 0; j
< i
; j
++) {
823 if (safexcel_algs
[j
]->type
== SAFEXCEL_ALG_TYPE_SKCIPHER
)
824 crypto_unregister_skcipher(&safexcel_algs
[j
]->alg
.skcipher
);
825 else if (safexcel_algs
[j
]->type
== SAFEXCEL_ALG_TYPE_AEAD
)
826 crypto_unregister_aead(&safexcel_algs
[j
]->alg
.aead
);
828 crypto_unregister_ahash(&safexcel_algs
[j
]->alg
.ahash
);
834 static void safexcel_unregister_algorithms(struct safexcel_crypto_priv
*priv
)
838 for (i
= 0; i
< ARRAY_SIZE(safexcel_algs
); i
++) {
839 if (safexcel_algs
[i
]->type
== SAFEXCEL_ALG_TYPE_SKCIPHER
)
840 crypto_unregister_skcipher(&safexcel_algs
[i
]->alg
.skcipher
);
841 else if (safexcel_algs
[i
]->type
== SAFEXCEL_ALG_TYPE_AEAD
)
842 crypto_unregister_aead(&safexcel_algs
[i
]->alg
.aead
);
844 crypto_unregister_ahash(&safexcel_algs
[i
]->alg
.ahash
);
848 static void safexcel_configure(struct safexcel_crypto_priv
*priv
)
852 val
= readl(EIP197_HIA_AIC_G(priv
) + EIP197_HIA_OPTIONS
);
853 val
= (val
& GENMASK(27, 25)) >> 25;
856 val
= readl(EIP197_HIA_AIC_G(priv
) + EIP197_HIA_OPTIONS
);
857 priv
->config
.rings
= min_t(u32
, val
& GENMASK(3, 0), max_rings
);
859 priv
->config
.cd_size
= (sizeof(struct safexcel_command_desc
) / sizeof(u32
));
860 priv
->config
.cd_offset
= (priv
->config
.cd_size
+ mask
) & ~mask
;
862 priv
->config
.rd_size
= (sizeof(struct safexcel_result_desc
) / sizeof(u32
));
863 priv
->config
.rd_offset
= (priv
->config
.rd_size
+ mask
) & ~mask
;
866 static void safexcel_init_register_offsets(struct safexcel_crypto_priv
*priv
)
868 struct safexcel_register_offsets
*offsets
= &priv
->offsets
;
870 if (priv
->version
== EIP197
) {
871 offsets
->hia_aic
= EIP197_HIA_AIC_BASE
;
872 offsets
->hia_aic_g
= EIP197_HIA_AIC_G_BASE
;
873 offsets
->hia_aic_r
= EIP197_HIA_AIC_R_BASE
;
874 offsets
->hia_aic_xdr
= EIP197_HIA_AIC_xDR_BASE
;
875 offsets
->hia_dfe
= EIP197_HIA_DFE_BASE
;
876 offsets
->hia_dfe_thr
= EIP197_HIA_DFE_THR_BASE
;
877 offsets
->hia_dse
= EIP197_HIA_DSE_BASE
;
878 offsets
->hia_dse_thr
= EIP197_HIA_DSE_THR_BASE
;
879 offsets
->hia_gen_cfg
= EIP197_HIA_GEN_CFG_BASE
;
880 offsets
->pe
= EIP197_PE_BASE
;
882 offsets
->hia_aic
= EIP97_HIA_AIC_BASE
;
883 offsets
->hia_aic_g
= EIP97_HIA_AIC_G_BASE
;
884 offsets
->hia_aic_r
= EIP97_HIA_AIC_R_BASE
;
885 offsets
->hia_aic_xdr
= EIP97_HIA_AIC_xDR_BASE
;
886 offsets
->hia_dfe
= EIP97_HIA_DFE_BASE
;
887 offsets
->hia_dfe_thr
= EIP97_HIA_DFE_THR_BASE
;
888 offsets
->hia_dse
= EIP97_HIA_DSE_BASE
;
889 offsets
->hia_dse_thr
= EIP97_HIA_DSE_THR_BASE
;
890 offsets
->hia_gen_cfg
= EIP97_HIA_GEN_CFG_BASE
;
891 offsets
->pe
= EIP97_PE_BASE
;
895 static int safexcel_probe(struct platform_device
*pdev
)
897 struct device
*dev
= &pdev
->dev
;
898 struct resource
*res
;
899 struct safexcel_crypto_priv
*priv
;
902 priv
= devm_kzalloc(dev
, sizeof(*priv
), GFP_KERNEL
);
907 priv
->version
= (enum safexcel_eip_version
)of_device_get_match_data(dev
);
909 safexcel_init_register_offsets(priv
);
911 res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
912 priv
->base
= devm_ioremap_resource(dev
, res
);
913 if (IS_ERR(priv
->base
)) {
914 dev_err(dev
, "failed to get resource\n");
915 return PTR_ERR(priv
->base
);
918 priv
->clk
= devm_clk_get(&pdev
->dev
, NULL
);
919 ret
= PTR_ERR_OR_ZERO(priv
->clk
);
920 /* The clock isn't mandatory */
921 if (ret
!= -ENOENT
) {
925 ret
= clk_prepare_enable(priv
->clk
);
927 dev_err(dev
, "unable to enable clk (%d)\n", ret
);
932 priv
->reg_clk
= devm_clk_get(&pdev
->dev
, "reg");
933 ret
= PTR_ERR_OR_ZERO(priv
->reg_clk
);
934 /* The clock isn't mandatory */
935 if (ret
!= -ENOENT
) {
939 ret
= clk_prepare_enable(priv
->reg_clk
);
941 dev_err(dev
, "unable to enable reg clk (%d)\n", ret
);
946 ret
= dma_set_mask_and_coherent(dev
, DMA_BIT_MASK(64));
950 priv
->context_pool
= dmam_pool_create("safexcel-context", dev
,
951 sizeof(struct safexcel_context_record
),
953 if (!priv
->context_pool
) {
958 safexcel_configure(priv
);
960 for (i
= 0; i
< priv
->config
.rings
; i
++) {
961 char irq_name
[6] = {0}; /* "ringX\0" */
962 char wq_name
[9] = {0}; /* "wq_ringX\0" */
964 struct safexcel_ring_irq_data
*ring_irq
;
966 ret
= safexcel_init_ring_descriptors(priv
,
972 ring_irq
= devm_kzalloc(dev
, sizeof(*ring_irq
), GFP_KERNEL
);
978 ring_irq
->priv
= priv
;
981 snprintf(irq_name
, 6, "ring%d", i
);
982 irq
= safexcel_request_ring_irq(pdev
, irq_name
, safexcel_irq_ring
,
983 safexcel_irq_ring_thread
,
990 priv
->ring
[i
].work_data
.priv
= priv
;
991 priv
->ring
[i
].work_data
.ring
= i
;
992 INIT_WORK(&priv
->ring
[i
].work_data
.work
, safexcel_dequeue_work
);
994 snprintf(wq_name
, 9, "wq_ring%d", i
);
995 priv
->ring
[i
].workqueue
= create_singlethread_workqueue(wq_name
);
996 if (!priv
->ring
[i
].workqueue
) {
1001 priv
->ring
[i
].requests
= 0;
1002 priv
->ring
[i
].busy
= false;
1004 crypto_init_queue(&priv
->ring
[i
].queue
,
1005 EIP197_DEFAULT_RING_SIZE
);
1007 INIT_LIST_HEAD(&priv
->ring
[i
].list
);
1008 spin_lock_init(&priv
->ring
[i
].lock
);
1009 spin_lock_init(&priv
->ring
[i
].egress_lock
);
1010 spin_lock_init(&priv
->ring
[i
].queue_lock
);
1013 platform_set_drvdata(pdev
, priv
);
1014 atomic_set(&priv
->ring_used
, 0);
1016 ret
= safexcel_hw_init(priv
);
1018 dev_err(dev
, "EIP h/w init failed (%d)\n", ret
);
1022 ret
= safexcel_register_algorithms(priv
);
1024 dev_err(dev
, "Failed to register algorithms (%d)\n", ret
);
1031 clk_disable_unprepare(priv
->reg_clk
);
1033 clk_disable_unprepare(priv
->clk
);
1038 static int safexcel_remove(struct platform_device
*pdev
)
1040 struct safexcel_crypto_priv
*priv
= platform_get_drvdata(pdev
);
1043 safexcel_unregister_algorithms(priv
);
1044 clk_disable_unprepare(priv
->clk
);
1046 for (i
= 0; i
< priv
->config
.rings
; i
++)
1047 destroy_workqueue(priv
->ring
[i
].workqueue
);
1052 static const struct of_device_id safexcel_of_match_table
[] = {
1054 .compatible
= "inside-secure,safexcel-eip97",
1055 .data
= (void *)EIP97
,
1058 .compatible
= "inside-secure,safexcel-eip197",
1059 .data
= (void *)EIP197
,
1065 static struct platform_driver crypto_safexcel
= {
1066 .probe
= safexcel_probe
,
1067 .remove
= safexcel_remove
,
1069 .name
= "crypto-safexcel",
1070 .of_match_table
= safexcel_of_match_table
,
1073 module_platform_driver(crypto_safexcel
);
1075 MODULE_AUTHOR("Antoine Tenart <antoine.tenart@free-electrons.com>");
1076 MODULE_AUTHOR("Ofer Heifetz <oferh@marvell.com>");
1077 MODULE_AUTHOR("Igal Liberman <igall@marvell.com>");
1078 MODULE_DESCRIPTION("Support for SafeXcel cryptographic engine EIP197");
1079 MODULE_LICENSE("GPL v2");