1 // SPDX-License-Identifier: GPL-2.0
3 * sl3516-ce-cipher.c - hardware cryptographic offloader for Storlink SL3516 SoC
5 * Copyright (C) 2021 Corentin LABBE <clabbe@baylibre.com>
7 * This file adds support for AES cipher with 128,192,256 bits keysize in
11 #include <crypto/engine.h>
12 #include <crypto/internal/skcipher.h>
13 #include <crypto/scatterwalk.h>
14 #include <linux/dma-mapping.h>
15 #include <linux/delay.h>
16 #include <linux/err.h>
18 #include <linux/kernel.h>
19 #include <linux/pm_runtime.h>
20 #include <linux/slab.h>
21 #include <linux/string.h>
22 #include "sl3516-ce.h"
24 /* sl3516_ce_need_fallback - check if a request can be handled by the CE */
25 static bool sl3516_ce_need_fallback(struct skcipher_request
*areq
)
27 struct crypto_skcipher
*tfm
= crypto_skcipher_reqtfm(areq
);
28 struct sl3516_ce_cipher_tfm_ctx
*op
= crypto_skcipher_ctx(tfm
);
29 struct sl3516_ce_dev
*ce
= op
->ce
;
30 struct scatterlist
*in_sg
;
31 struct scatterlist
*out_sg
;
32 struct scatterlist
*sg
;
34 if (areq
->cryptlen
== 0 || areq
->cryptlen
% 16) {
40 * check if we have enough descriptors for TX
41 * Note: TX need one control desc for each SG
43 if (sg_nents(areq
->src
) > MAXDESC
/ 2) {
44 ce
->fallback_sg_count_tx
++;
47 /* check if we have enough descriptors for RX */
48 if (sg_nents(areq
->dst
) > MAXDESC
) {
49 ce
->fallback_sg_count_rx
++;
55 if ((sg
->length
% 16) != 0) {
59 if ((sg_dma_len(sg
) % 16) != 0) {
63 if (!IS_ALIGNED(sg
->offset
, 16)) {
64 ce
->fallback_align16
++;
71 if ((sg
->length
% 16) != 0) {
75 if ((sg_dma_len(sg
) % 16) != 0) {
79 if (!IS_ALIGNED(sg
->offset
, 16)) {
80 ce
->fallback_align16
++;
86 /* need same numbers of SG (with same length) for source and destination */
89 while (in_sg
&& out_sg
) {
90 if (in_sg
->length
!= out_sg
->length
) {
91 ce
->fallback_not_same_len
++;
94 in_sg
= sg_next(in_sg
);
95 out_sg
= sg_next(out_sg
);
103 static int sl3516_ce_cipher_fallback(struct skcipher_request
*areq
)
105 struct crypto_skcipher
*tfm
= crypto_skcipher_reqtfm(areq
);
106 struct sl3516_ce_cipher_tfm_ctx
*op
= crypto_skcipher_ctx(tfm
);
107 struct sl3516_ce_cipher_req_ctx
*rctx
= skcipher_request_ctx(areq
);
108 struct skcipher_alg
*alg
= crypto_skcipher_alg(tfm
);
109 struct sl3516_ce_alg_template
*algt
;
112 algt
= container_of(alg
, struct sl3516_ce_alg_template
, alg
.skcipher
.base
);
115 skcipher_request_set_tfm(&rctx
->fallback_req
, op
->fallback_tfm
);
116 skcipher_request_set_callback(&rctx
->fallback_req
, areq
->base
.flags
,
117 areq
->base
.complete
, areq
->base
.data
);
118 skcipher_request_set_crypt(&rctx
->fallback_req
, areq
->src
, areq
->dst
,
119 areq
->cryptlen
, areq
->iv
);
120 if (rctx
->op_dir
== CE_DECRYPTION
)
121 err
= crypto_skcipher_decrypt(&rctx
->fallback_req
);
123 err
= crypto_skcipher_encrypt(&rctx
->fallback_req
);
127 static int sl3516_ce_cipher(struct skcipher_request
*areq
)
129 struct crypto_skcipher
*tfm
= crypto_skcipher_reqtfm(areq
);
130 struct sl3516_ce_cipher_tfm_ctx
*op
= crypto_skcipher_ctx(tfm
);
131 struct sl3516_ce_dev
*ce
= op
->ce
;
132 struct sl3516_ce_cipher_req_ctx
*rctx
= skcipher_request_ctx(areq
);
133 struct skcipher_alg
*alg
= crypto_skcipher_alg(tfm
);
134 struct sl3516_ce_alg_template
*algt
;
135 struct scatterlist
*sg
;
136 unsigned int todo
, len
;
137 struct pkt_control_ecb
*ecb
;
143 algt
= container_of(alg
, struct sl3516_ce_alg_template
, alg
.skcipher
.base
);
145 dev_dbg(ce
->dev
, "%s %s %u %x IV(%p %u) key=%u\n", __func__
,
146 crypto_tfm_alg_name(areq
->base
.tfm
),
148 rctx
->op_dir
, areq
->iv
, crypto_skcipher_ivsize(tfm
),
153 if (areq
->src
== areq
->dst
) {
154 nr_sgs
= dma_map_sg(ce
->dev
, areq
->src
, sg_nents(areq
->src
),
156 if (nr_sgs
<= 0 || nr_sgs
> MAXDESC
/ 2) {
157 dev_err(ce
->dev
, "Invalid sg number %d\n", nr_sgs
);
163 nr_sgs
= dma_map_sg(ce
->dev
, areq
->src
, sg_nents(areq
->src
),
165 if (nr_sgs
<= 0 || nr_sgs
> MAXDESC
/ 2) {
166 dev_err(ce
->dev
, "Invalid sg number %d\n", nr_sgs
);
170 nr_sgd
= dma_map_sg(ce
->dev
, areq
->dst
, sg_nents(areq
->dst
),
172 if (nr_sgd
<= 0 || nr_sgd
> MAXDESC
) {
173 dev_err(ce
->dev
, "Invalid sg number %d\n", nr_sgd
);
179 len
= areq
->cryptlen
;
182 while (i
< nr_sgs
&& sg
&& len
) {
183 if (sg_dma_len(sg
) == 0)
185 rctx
->t_src
[i
].addr
= sg_dma_address(sg
);
186 todo
= min(len
, sg_dma_len(sg
));
187 rctx
->t_src
[i
].len
= todo
;
188 dev_dbg(ce
->dev
, "%s total=%u SGS(%d %u off=%d) todo=%u\n", __func__
,
189 areq
->cryptlen
, i
, rctx
->t_src
[i
].len
, sg
->offset
, todo
);
196 dev_err(ce
->dev
, "remaining len %d/%u nr_sgs=%d\n", len
, areq
->cryptlen
, nr_sgs
);
201 len
= areq
->cryptlen
;
204 while (i
< nr_sgd
&& sg
&& len
) {
205 if (sg_dma_len(sg
) == 0)
207 rctx
->t_dst
[i
].addr
= sg_dma_address(sg
);
208 todo
= min(len
, sg_dma_len(sg
));
209 rctx
->t_dst
[i
].len
= todo
;
210 dev_dbg(ce
->dev
, "%s total=%u SGD(%d %u off=%d) todo=%u\n", __func__
,
211 areq
->cryptlen
, i
, rctx
->t_dst
[i
].len
, sg
->offset
, todo
);
219 dev_err(ce
->dev
, "remaining len %d\n", len
);
224 switch (algt
->mode
) {
226 rctx
->pctrllen
= sizeof(struct pkt_control_ecb
);
227 ecb
= (struct pkt_control_ecb
*)ce
->pctrl
;
229 rctx
->tqflag
= TQ0_TYPE_CTRL
;
230 rctx
->tqflag
|= TQ1_CIPHER
;
231 ecb
->control
.op_mode
= rctx
->op_dir
;
232 ecb
->control
.cipher_algorithm
= ECB_AES
;
233 ecb
->cipher
.header_len
= 0;
234 ecb
->cipher
.algorithm_len
= areq
->cryptlen
;
235 cpu_to_be32_array((__be32
*)ecb
->key
, (u32
*)op
->key
, op
->keylen
/ 4);
236 rctx
->h
= &ecb
->cipher
;
238 rctx
->tqflag
|= TQ4_KEY0
;
239 rctx
->tqflag
|= TQ5_KEY4
;
240 rctx
->tqflag
|= TQ6_KEY6
;
241 ecb
->control
.aesnk
= op
->keylen
/ 4;
245 rctx
->nr_sgs
= nr_sgs
;
246 rctx
->nr_sgd
= nr_sgd
;
247 err
= sl3516_ce_run_task(ce
, rctx
, crypto_tfm_alg_name(areq
->base
.tfm
));
250 if (areq
->src
== areq
->dst
) {
251 dma_unmap_sg(ce
->dev
, areq
->src
, sg_nents(areq
->src
),
254 dma_unmap_sg(ce
->dev
, areq
->src
, sg_nents(areq
->src
),
256 dma_unmap_sg(ce
->dev
, areq
->dst
, sg_nents(areq
->dst
),
265 int sl3516_ce_handle_cipher_request(struct crypto_engine
*engine
, void *areq
)
268 struct skcipher_request
*breq
= container_of(areq
, struct skcipher_request
, base
);
270 err
= sl3516_ce_cipher(breq
);
272 crypto_finalize_skcipher_request(engine
, breq
, err
);
278 int sl3516_ce_skdecrypt(struct skcipher_request
*areq
)
280 struct crypto_skcipher
*tfm
= crypto_skcipher_reqtfm(areq
);
281 struct sl3516_ce_cipher_tfm_ctx
*op
= crypto_skcipher_ctx(tfm
);
282 struct sl3516_ce_cipher_req_ctx
*rctx
= skcipher_request_ctx(areq
);
283 struct crypto_engine
*engine
;
285 memset(rctx
, 0, sizeof(struct sl3516_ce_cipher_req_ctx
));
286 rctx
->op_dir
= CE_DECRYPTION
;
288 if (sl3516_ce_need_fallback(areq
))
289 return sl3516_ce_cipher_fallback(areq
);
291 engine
= op
->ce
->engine
;
293 return crypto_transfer_skcipher_request_to_engine(engine
, areq
);
296 int sl3516_ce_skencrypt(struct skcipher_request
*areq
)
298 struct crypto_skcipher
*tfm
= crypto_skcipher_reqtfm(areq
);
299 struct sl3516_ce_cipher_tfm_ctx
*op
= crypto_skcipher_ctx(tfm
);
300 struct sl3516_ce_cipher_req_ctx
*rctx
= skcipher_request_ctx(areq
);
301 struct crypto_engine
*engine
;
303 memset(rctx
, 0, sizeof(struct sl3516_ce_cipher_req_ctx
));
304 rctx
->op_dir
= CE_ENCRYPTION
;
306 if (sl3516_ce_need_fallback(areq
))
307 return sl3516_ce_cipher_fallback(areq
);
309 engine
= op
->ce
->engine
;
311 return crypto_transfer_skcipher_request_to_engine(engine
, areq
);
314 int sl3516_ce_cipher_init(struct crypto_tfm
*tfm
)
316 struct sl3516_ce_cipher_tfm_ctx
*op
= crypto_tfm_ctx(tfm
);
317 struct sl3516_ce_alg_template
*algt
;
318 const char *name
= crypto_tfm_alg_name(tfm
);
319 struct crypto_skcipher
*sktfm
= __crypto_skcipher_cast(tfm
);
320 struct skcipher_alg
*alg
= crypto_skcipher_alg(sktfm
);
323 memset(op
, 0, sizeof(struct sl3516_ce_cipher_tfm_ctx
));
325 algt
= container_of(alg
, struct sl3516_ce_alg_template
, alg
.skcipher
.base
);
328 op
->fallback_tfm
= crypto_alloc_skcipher(name
, 0, CRYPTO_ALG_NEED_FALLBACK
);
329 if (IS_ERR(op
->fallback_tfm
)) {
330 dev_err(op
->ce
->dev
, "ERROR: Cannot allocate fallback for %s %ld\n",
331 name
, PTR_ERR(op
->fallback_tfm
));
332 return PTR_ERR(op
->fallback_tfm
);
335 crypto_skcipher_set_reqsize(sktfm
, sizeof(struct sl3516_ce_cipher_req_ctx
) +
336 crypto_skcipher_reqsize(op
->fallback_tfm
));
338 dev_info(op
->ce
->dev
, "Fallback for %s is %s\n",
339 crypto_tfm_alg_driver_name(&sktfm
->base
),
340 crypto_tfm_alg_driver_name(crypto_skcipher_tfm(op
->fallback_tfm
)));
342 err
= pm_runtime_get_sync(op
->ce
->dev
);
348 pm_runtime_put_noidle(op
->ce
->dev
);
349 crypto_free_skcipher(op
->fallback_tfm
);
353 void sl3516_ce_cipher_exit(struct crypto_tfm
*tfm
)
355 struct sl3516_ce_cipher_tfm_ctx
*op
= crypto_tfm_ctx(tfm
);
357 kfree_sensitive(op
->key
);
358 crypto_free_skcipher(op
->fallback_tfm
);
359 pm_runtime_put_sync_suspend(op
->ce
->dev
);
362 int sl3516_ce_aes_setkey(struct crypto_skcipher
*tfm
, const u8
*key
,
365 struct sl3516_ce_cipher_tfm_ctx
*op
= crypto_skcipher_ctx(tfm
);
366 struct sl3516_ce_dev
*ce
= op
->ce
;
376 dev_dbg(ce
->dev
, "ERROR: Invalid keylen %u\n", keylen
);
379 kfree_sensitive(op
->key
);
381 op
->key
= kmemdup(key
, keylen
, GFP_KERNEL
| GFP_DMA
);
385 crypto_skcipher_clear_flags(op
->fallback_tfm
, CRYPTO_TFM_REQ_MASK
);
386 crypto_skcipher_set_flags(op
->fallback_tfm
, tfm
->base
.crt_flags
& CRYPTO_TFM_REQ_MASK
);
388 return crypto_skcipher_setkey(op
->fallback_tfm
, key
, keylen
);