1 // SPDX-License-Identifier: GPL-2.0
3 * amlogic-cipher.c - hardware cryptographic offloader for Amlogic GXL SoC
5 * Copyright (C) 2018-2019 Corentin LABBE <clabbe@baylibre.com>
7 * This file add support for AES cipher with 128,192,256 bits keysize in
11 #include <linux/crypto.h>
12 #include <linux/delay.h>
14 #include <crypto/scatterwalk.h>
15 #include <linux/scatterlist.h>
16 #include <linux/dma-mapping.h>
17 #include <crypto/internal/skcipher.h>
18 #include "amlogic-gxl.h"
20 static int get_engine_number(struct meson_dev
*mc
)
22 return atomic_inc_return(&mc
->flow
) % MAXFLOW
;
25 static bool meson_cipher_need_fallback(struct skcipher_request
*areq
)
27 struct scatterlist
*src_sg
= areq
->src
;
28 struct scatterlist
*dst_sg
= areq
->dst
;
30 if (areq
->cryptlen
== 0)
33 if (sg_nents(src_sg
) != sg_nents(dst_sg
))
36 /* KEY/IV descriptors use 3 desc */
37 if (sg_nents(src_sg
) > MAXDESC
- 3 || sg_nents(dst_sg
) > MAXDESC
- 3)
40 while (src_sg
&& dst_sg
) {
41 if ((src_sg
->length
% 16) != 0)
43 if ((dst_sg
->length
% 16) != 0)
45 if (src_sg
->length
!= dst_sg
->length
)
47 if (!IS_ALIGNED(src_sg
->offset
, sizeof(u32
)))
49 if (!IS_ALIGNED(dst_sg
->offset
, sizeof(u32
)))
51 src_sg
= sg_next(src_sg
);
52 dst_sg
= sg_next(dst_sg
);
58 static int meson_cipher_do_fallback(struct skcipher_request
*areq
)
60 struct crypto_skcipher
*tfm
= crypto_skcipher_reqtfm(areq
);
61 struct meson_cipher_tfm_ctx
*op
= crypto_skcipher_ctx(tfm
);
62 struct meson_cipher_req_ctx
*rctx
= skcipher_request_ctx(areq
);
64 #ifdef CONFIG_CRYPTO_DEV_AMLOGIC_GXL_DEBUG
65 struct skcipher_alg
*alg
= crypto_skcipher_alg(tfm
);
66 struct meson_alg_template
*algt
;
68 algt
= container_of(alg
, struct meson_alg_template
, alg
.skcipher
);
71 skcipher_request_set_tfm(&rctx
->fallback_req
, op
->fallback_tfm
);
72 skcipher_request_set_callback(&rctx
->fallback_req
, areq
->base
.flags
,
73 areq
->base
.complete
, areq
->base
.data
);
74 skcipher_request_set_crypt(&rctx
->fallback_req
, areq
->src
, areq
->dst
,
75 areq
->cryptlen
, areq
->iv
);
77 if (rctx
->op_dir
== MESON_DECRYPT
)
78 err
= crypto_skcipher_decrypt(&rctx
->fallback_req
);
80 err
= crypto_skcipher_encrypt(&rctx
->fallback_req
);
84 static int meson_cipher(struct skcipher_request
*areq
)
86 struct crypto_skcipher
*tfm
= crypto_skcipher_reqtfm(areq
);
87 struct meson_cipher_tfm_ctx
*op
= crypto_skcipher_ctx(tfm
);
88 struct meson_cipher_req_ctx
*rctx
= skcipher_request_ctx(areq
);
89 struct meson_dev
*mc
= op
->mc
;
90 struct skcipher_alg
*alg
= crypto_skcipher_alg(tfm
);
91 struct meson_alg_template
*algt
;
92 int flow
= rctx
->flow
;
93 unsigned int todo
, eat
, len
;
94 struct scatterlist
*src_sg
= areq
->src
;
95 struct scatterlist
*dst_sg
= areq
->dst
;
96 struct meson_desc
*desc
;
99 unsigned int keyivlen
, ivsize
, offset
, tloffset
;
101 void *backup_iv
= NULL
, *bkeyiv
;
104 algt
= container_of(alg
, struct meson_alg_template
, alg
.skcipher
);
106 dev_dbg(mc
->dev
, "%s %s %u %x IV(%u) key=%u flow=%d\n", __func__
,
107 crypto_tfm_alg_name(areq
->base
.tfm
),
109 rctx
->op_dir
, crypto_skcipher_ivsize(tfm
),
112 #ifdef CONFIG_CRYPTO_DEV_AMLOGIC_GXL_DEBUG
114 mc
->chanlist
[flow
].stat_req
++;
118 * The hardware expect a list of meson_desc structures.
119 * The 2 first structures store key
120 * The third stores IV
122 bkeyiv
= kzalloc(48, GFP_KERNEL
| GFP_DMA
);
126 memcpy(bkeyiv
, op
->key
, op
->keylen
);
127 keyivlen
= op
->keylen
;
129 ivsize
= crypto_skcipher_ivsize(tfm
);
130 if (areq
->iv
&& ivsize
> 0) {
131 if (ivsize
> areq
->cryptlen
) {
132 dev_err(mc
->dev
, "invalid ivsize=%d vs len=%d\n", ivsize
, areq
->cryptlen
);
136 memcpy(bkeyiv
+ 32, areq
->iv
, ivsize
);
138 if (rctx
->op_dir
== MESON_DECRYPT
) {
139 backup_iv
= kzalloc(ivsize
, GFP_KERNEL
);
144 offset
= areq
->cryptlen
- ivsize
;
145 scatterwalk_map_and_copy(backup_iv
, areq
->src
, offset
,
152 phykeyiv
= dma_map_single(mc
->dev
, bkeyiv
, keyivlen
,
154 err
= dma_mapping_error(mc
->dev
, phykeyiv
);
156 dev_err(mc
->dev
, "Cannot DMA MAP KEY IV\n");
163 while (keyivlen
> eat
) {
164 desc
= &mc
->chanlist
[flow
].tl
[tloffset
];
165 memset(desc
, 0, sizeof(struct meson_desc
));
166 todo
= min(keyivlen
- eat
, 16u);
167 desc
->t_src
= cpu_to_le32(phykeyiv
+ i
* 16);
168 desc
->t_dst
= cpu_to_le32(i
* 16);
169 v
= (MODE_KEY
<< 20) | DESC_OWN
| 16;
170 desc
->t_status
= cpu_to_le32(v
);
177 if (areq
->src
== areq
->dst
) {
178 nr_sgs
= dma_map_sg(mc
->dev
, areq
->src
, sg_nents(areq
->src
),
181 dev_err(mc
->dev
, "Invalid SG count %d\n", nr_sgs
);
187 nr_sgs
= dma_map_sg(mc
->dev
, areq
->src
, sg_nents(areq
->src
),
189 if (nr_sgs
< 0 || nr_sgs
> MAXDESC
- 3) {
190 dev_err(mc
->dev
, "Invalid SG count %d\n", nr_sgs
);
194 nr_sgd
= dma_map_sg(mc
->dev
, areq
->dst
, sg_nents(areq
->dst
),
196 if (nr_sgd
< 0 || nr_sgd
> MAXDESC
- 3) {
197 dev_err(mc
->dev
, "Invalid SG count %d\n", nr_sgd
);
205 len
= areq
->cryptlen
;
207 desc
= &mc
->chanlist
[flow
].tl
[tloffset
];
208 memset(desc
, 0, sizeof(struct meson_desc
));
210 desc
->t_src
= cpu_to_le32(sg_dma_address(src_sg
));
211 desc
->t_dst
= cpu_to_le32(sg_dma_address(dst_sg
));
212 todo
= min(len
, sg_dma_len(src_sg
));
213 v
= (op
->keymode
<< 20) | DESC_OWN
| todo
| (algt
->blockmode
<< 26);
215 v
|= DESC_ENCRYPTION
;
218 if (!sg_next(src_sg
))
220 desc
->t_status
= cpu_to_le32(v
);
222 src_sg
= sg_next(src_sg
);
223 dst_sg
= sg_next(dst_sg
);
226 reinit_completion(&mc
->chanlist
[flow
].complete
);
227 mc
->chanlist
[flow
].status
= 0;
228 writel(mc
->chanlist
[flow
].t_phy
| 2, mc
->base
+ (flow
<< 2));
229 wait_for_completion_interruptible_timeout(&mc
->chanlist
[flow
].complete
,
230 msecs_to_jiffies(500));
231 if (mc
->chanlist
[flow
].status
== 0) {
232 dev_err(mc
->dev
, "DMA timeout for flow %d\n", flow
);
236 dma_unmap_single(mc
->dev
, phykeyiv
, keyivlen
, DMA_TO_DEVICE
);
238 if (areq
->src
== areq
->dst
) {
239 dma_unmap_sg(mc
->dev
, areq
->src
, nr_sgs
, DMA_BIDIRECTIONAL
);
241 dma_unmap_sg(mc
->dev
, areq
->src
, nr_sgs
, DMA_TO_DEVICE
);
242 dma_unmap_sg(mc
->dev
, areq
->dst
, nr_sgd
, DMA_FROM_DEVICE
);
245 if (areq
->iv
&& ivsize
> 0) {
246 if (rctx
->op_dir
== MESON_DECRYPT
) {
247 memcpy(areq
->iv
, backup_iv
, ivsize
);
249 scatterwalk_map_and_copy(areq
->iv
, areq
->dst
,
250 areq
->cryptlen
- ivsize
,
255 kfree_sensitive(bkeyiv
);
256 kfree_sensitive(backup_iv
);
261 static int meson_handle_cipher_request(struct crypto_engine
*engine
,
265 struct skcipher_request
*breq
= container_of(areq
, struct skcipher_request
, base
);
267 err
= meson_cipher(breq
);
268 crypto_finalize_skcipher_request(engine
, breq
, err
);
273 int meson_skdecrypt(struct skcipher_request
*areq
)
275 struct crypto_skcipher
*tfm
= crypto_skcipher_reqtfm(areq
);
276 struct meson_cipher_tfm_ctx
*op
= crypto_skcipher_ctx(tfm
);
277 struct meson_cipher_req_ctx
*rctx
= skcipher_request_ctx(areq
);
278 struct crypto_engine
*engine
;
281 rctx
->op_dir
= MESON_DECRYPT
;
282 if (meson_cipher_need_fallback(areq
))
283 return meson_cipher_do_fallback(areq
);
284 e
= get_engine_number(op
->mc
);
285 engine
= op
->mc
->chanlist
[e
].engine
;
288 return crypto_transfer_skcipher_request_to_engine(engine
, areq
);
291 int meson_skencrypt(struct skcipher_request
*areq
)
293 struct crypto_skcipher
*tfm
= crypto_skcipher_reqtfm(areq
);
294 struct meson_cipher_tfm_ctx
*op
= crypto_skcipher_ctx(tfm
);
295 struct meson_cipher_req_ctx
*rctx
= skcipher_request_ctx(areq
);
296 struct crypto_engine
*engine
;
299 rctx
->op_dir
= MESON_ENCRYPT
;
300 if (meson_cipher_need_fallback(areq
))
301 return meson_cipher_do_fallback(areq
);
302 e
= get_engine_number(op
->mc
);
303 engine
= op
->mc
->chanlist
[e
].engine
;
306 return crypto_transfer_skcipher_request_to_engine(engine
, areq
);
309 int meson_cipher_init(struct crypto_tfm
*tfm
)
311 struct meson_cipher_tfm_ctx
*op
= crypto_tfm_ctx(tfm
);
312 struct meson_alg_template
*algt
;
313 const char *name
= crypto_tfm_alg_name(tfm
);
314 struct crypto_skcipher
*sktfm
= __crypto_skcipher_cast(tfm
);
315 struct skcipher_alg
*alg
= crypto_skcipher_alg(sktfm
);
317 memset(op
, 0, sizeof(struct meson_cipher_tfm_ctx
));
319 algt
= container_of(alg
, struct meson_alg_template
, alg
.skcipher
);
322 op
->fallback_tfm
= crypto_alloc_skcipher(name
, 0, CRYPTO_ALG_NEED_FALLBACK
);
323 if (IS_ERR(op
->fallback_tfm
)) {
324 dev_err(op
->mc
->dev
, "ERROR: Cannot allocate fallback for %s %ld\n",
325 name
, PTR_ERR(op
->fallback_tfm
));
326 return PTR_ERR(op
->fallback_tfm
);
329 sktfm
->reqsize
= sizeof(struct meson_cipher_req_ctx
) +
330 crypto_skcipher_reqsize(op
->fallback_tfm
);
332 op
->enginectx
.op
.do_one_request
= meson_handle_cipher_request
;
333 op
->enginectx
.op
.prepare_request
= NULL
;
334 op
->enginectx
.op
.unprepare_request
= NULL
;
339 void meson_cipher_exit(struct crypto_tfm
*tfm
)
341 struct meson_cipher_tfm_ctx
*op
= crypto_tfm_ctx(tfm
);
343 kfree_sensitive(op
->key
);
344 crypto_free_skcipher(op
->fallback_tfm
);
347 int meson_aes_setkey(struct crypto_skcipher
*tfm
, const u8
*key
,
350 struct meson_cipher_tfm_ctx
*op
= crypto_skcipher_ctx(tfm
);
351 struct meson_dev
*mc
= op
->mc
;
355 op
->keymode
= MODE_AES_128
;
358 op
->keymode
= MODE_AES_192
;
361 op
->keymode
= MODE_AES_256
;
364 dev_dbg(mc
->dev
, "ERROR: Invalid keylen %u\n", keylen
);
367 kfree_sensitive(op
->key
);
369 op
->key
= kmemdup(key
, keylen
, GFP_KERNEL
| GFP_DMA
);
373 return crypto_skcipher_setkey(op
->fallback_tfm
, key
, keylen
);