1 // SPDX-License-Identifier: GPL-2.0
3 * amlogic-cipher.c - hardware cryptographic offloader for Amlogic GXL SoC
5 * Copyright (C) 2018-2019 Corentin LABBE <clabbe@baylibre.com>
7 * This file add support for AES cipher with 128,192,256 bits keysize in
11 #include <linux/crypto.h>
12 #include <linux/delay.h>
14 #include <crypto/scatterwalk.h>
15 #include <linux/scatterlist.h>
16 #include <linux/dma-mapping.h>
17 #include <crypto/internal/skcipher.h>
18 #include "amlogic-gxl.h"
20 static int get_engine_number(struct meson_dev
*mc
)
22 return atomic_inc_return(&mc
->flow
) % MAXFLOW
;
25 static bool meson_cipher_need_fallback(struct skcipher_request
*areq
)
27 struct scatterlist
*src_sg
= areq
->src
;
28 struct scatterlist
*dst_sg
= areq
->dst
;
30 if (areq
->cryptlen
== 0)
33 if (sg_nents(src_sg
) != sg_nents(dst_sg
))
36 /* KEY/IV descriptors use 3 desc */
37 if (sg_nents(src_sg
) > MAXDESC
- 3 || sg_nents(dst_sg
) > MAXDESC
- 3)
40 while (src_sg
&& dst_sg
) {
41 if ((src_sg
->length
% 16) != 0)
43 if ((dst_sg
->length
% 16) != 0)
45 if (src_sg
->length
!= dst_sg
->length
)
47 if (!IS_ALIGNED(src_sg
->offset
, sizeof(u32
)))
49 if (!IS_ALIGNED(dst_sg
->offset
, sizeof(u32
)))
51 src_sg
= sg_next(src_sg
);
52 dst_sg
= sg_next(dst_sg
);
58 static int meson_cipher_do_fallback(struct skcipher_request
*areq
)
60 struct crypto_skcipher
*tfm
= crypto_skcipher_reqtfm(areq
);
61 struct meson_cipher_tfm_ctx
*op
= crypto_skcipher_ctx(tfm
);
62 struct meson_cipher_req_ctx
*rctx
= skcipher_request_ctx(areq
);
64 #ifdef CONFIG_CRYPTO_DEV_AMLOGIC_GXL_DEBUG
65 struct skcipher_alg
*alg
= crypto_skcipher_alg(tfm
);
66 struct meson_alg_template
*algt
;
68 SYNC_SKCIPHER_REQUEST_ON_STACK(req
, op
->fallback_tfm
);
70 #ifdef CONFIG_CRYPTO_DEV_AMLOGIC_GXL_DEBUG
71 algt
= container_of(alg
, struct meson_alg_template
, alg
.skcipher
);
74 skcipher_request_set_sync_tfm(req
, op
->fallback_tfm
);
75 skcipher_request_set_callback(req
, areq
->base
.flags
, NULL
, NULL
);
76 skcipher_request_set_crypt(req
, areq
->src
, areq
->dst
,
77 areq
->cryptlen
, areq
->iv
);
78 if (rctx
->op_dir
== MESON_DECRYPT
)
79 err
= crypto_skcipher_decrypt(req
);
81 err
= crypto_skcipher_encrypt(req
);
82 skcipher_request_zero(req
);
86 static int meson_cipher(struct skcipher_request
*areq
)
88 struct crypto_skcipher
*tfm
= crypto_skcipher_reqtfm(areq
);
89 struct meson_cipher_tfm_ctx
*op
= crypto_skcipher_ctx(tfm
);
90 struct meson_cipher_req_ctx
*rctx
= skcipher_request_ctx(areq
);
91 struct meson_dev
*mc
= op
->mc
;
92 struct skcipher_alg
*alg
= crypto_skcipher_alg(tfm
);
93 struct meson_alg_template
*algt
;
94 int flow
= rctx
->flow
;
95 unsigned int todo
, eat
, len
;
96 struct scatterlist
*src_sg
= areq
->src
;
97 struct scatterlist
*dst_sg
= areq
->dst
;
98 struct meson_desc
*desc
;
101 unsigned int keyivlen
, ivsize
, offset
, tloffset
;
103 void *backup_iv
= NULL
, *bkeyiv
;
106 algt
= container_of(alg
, struct meson_alg_template
, alg
.skcipher
);
108 dev_dbg(mc
->dev
, "%s %s %u %x IV(%u) key=%u flow=%d\n", __func__
,
109 crypto_tfm_alg_name(areq
->base
.tfm
),
111 rctx
->op_dir
, crypto_skcipher_ivsize(tfm
),
114 #ifdef CONFIG_CRYPTO_DEV_AMLOGIC_GXL_DEBUG
116 mc
->chanlist
[flow
].stat_req
++;
120 * The hardware expect a list of meson_desc structures.
121 * The 2 first structures store key
122 * The third stores IV
124 bkeyiv
= kzalloc(48, GFP_KERNEL
| GFP_DMA
);
128 memcpy(bkeyiv
, op
->key
, op
->keylen
);
129 keyivlen
= op
->keylen
;
131 ivsize
= crypto_skcipher_ivsize(tfm
);
132 if (areq
->iv
&& ivsize
> 0) {
133 if (ivsize
> areq
->cryptlen
) {
134 dev_err(mc
->dev
, "invalid ivsize=%d vs len=%d\n", ivsize
, areq
->cryptlen
);
138 memcpy(bkeyiv
+ 32, areq
->iv
, ivsize
);
140 if (rctx
->op_dir
== MESON_DECRYPT
) {
141 backup_iv
= kzalloc(ivsize
, GFP_KERNEL
);
146 offset
= areq
->cryptlen
- ivsize
;
147 scatterwalk_map_and_copy(backup_iv
, areq
->src
, offset
,
154 phykeyiv
= dma_map_single(mc
->dev
, bkeyiv
, keyivlen
,
156 err
= dma_mapping_error(mc
->dev
, phykeyiv
);
158 dev_err(mc
->dev
, "Cannot DMA MAP KEY IV\n");
165 while (keyivlen
> eat
) {
166 desc
= &mc
->chanlist
[flow
].tl
[tloffset
];
167 memset(desc
, 0, sizeof(struct meson_desc
));
168 todo
= min(keyivlen
- eat
, 16u);
169 desc
->t_src
= cpu_to_le32(phykeyiv
+ i
* 16);
170 desc
->t_dst
= cpu_to_le32(i
* 16);
171 v
= (MODE_KEY
<< 20) | DESC_OWN
| 16;
172 desc
->t_status
= cpu_to_le32(v
);
179 if (areq
->src
== areq
->dst
) {
180 nr_sgs
= dma_map_sg(mc
->dev
, areq
->src
, sg_nents(areq
->src
),
183 dev_err(mc
->dev
, "Invalid SG count %d\n", nr_sgs
);
189 nr_sgs
= dma_map_sg(mc
->dev
, areq
->src
, sg_nents(areq
->src
),
191 if (nr_sgs
< 0 || nr_sgs
> MAXDESC
- 3) {
192 dev_err(mc
->dev
, "Invalid SG count %d\n", nr_sgs
);
196 nr_sgd
= dma_map_sg(mc
->dev
, areq
->dst
, sg_nents(areq
->dst
),
198 if (nr_sgd
< 0 || nr_sgd
> MAXDESC
- 3) {
199 dev_err(mc
->dev
, "Invalid SG count %d\n", nr_sgd
);
207 len
= areq
->cryptlen
;
209 desc
= &mc
->chanlist
[flow
].tl
[tloffset
];
210 memset(desc
, 0, sizeof(struct meson_desc
));
212 desc
->t_src
= cpu_to_le32(sg_dma_address(src_sg
));
213 desc
->t_dst
= cpu_to_le32(sg_dma_address(dst_sg
));
214 todo
= min(len
, sg_dma_len(src_sg
));
215 v
= (op
->keymode
<< 20) | DESC_OWN
| todo
| (algt
->blockmode
<< 26);
217 v
|= DESC_ENCRYPTION
;
220 if (!sg_next(src_sg
))
222 desc
->t_status
= cpu_to_le32(v
);
224 src_sg
= sg_next(src_sg
);
225 dst_sg
= sg_next(dst_sg
);
228 reinit_completion(&mc
->chanlist
[flow
].complete
);
229 mc
->chanlist
[flow
].status
= 0;
230 writel(mc
->chanlist
[flow
].t_phy
| 2, mc
->base
+ (flow
<< 2));
231 wait_for_completion_interruptible_timeout(&mc
->chanlist
[flow
].complete
,
232 msecs_to_jiffies(500));
233 if (mc
->chanlist
[flow
].status
== 0) {
234 dev_err(mc
->dev
, "DMA timeout for flow %d\n", flow
);
238 dma_unmap_single(mc
->dev
, phykeyiv
, keyivlen
, DMA_TO_DEVICE
);
240 if (areq
->src
== areq
->dst
) {
241 dma_unmap_sg(mc
->dev
, areq
->src
, nr_sgs
, DMA_BIDIRECTIONAL
);
243 dma_unmap_sg(mc
->dev
, areq
->src
, nr_sgs
, DMA_TO_DEVICE
);
244 dma_unmap_sg(mc
->dev
, areq
->dst
, nr_sgd
, DMA_FROM_DEVICE
);
247 if (areq
->iv
&& ivsize
> 0) {
248 if (rctx
->op_dir
== MESON_DECRYPT
) {
249 memcpy(areq
->iv
, backup_iv
, ivsize
);
251 scatterwalk_map_and_copy(areq
->iv
, areq
->dst
,
252 areq
->cryptlen
- ivsize
,
263 static int meson_handle_cipher_request(struct crypto_engine
*engine
,
267 struct skcipher_request
*breq
= container_of(areq
, struct skcipher_request
, base
);
269 err
= meson_cipher(breq
);
270 crypto_finalize_skcipher_request(engine
, breq
, err
);
275 int meson_skdecrypt(struct skcipher_request
*areq
)
277 struct crypto_skcipher
*tfm
= crypto_skcipher_reqtfm(areq
);
278 struct meson_cipher_tfm_ctx
*op
= crypto_skcipher_ctx(tfm
);
279 struct meson_cipher_req_ctx
*rctx
= skcipher_request_ctx(areq
);
280 struct crypto_engine
*engine
;
283 rctx
->op_dir
= MESON_DECRYPT
;
284 if (meson_cipher_need_fallback(areq
))
285 return meson_cipher_do_fallback(areq
);
286 e
= get_engine_number(op
->mc
);
287 engine
= op
->mc
->chanlist
[e
].engine
;
290 return crypto_transfer_skcipher_request_to_engine(engine
, areq
);
293 int meson_skencrypt(struct skcipher_request
*areq
)
295 struct crypto_skcipher
*tfm
= crypto_skcipher_reqtfm(areq
);
296 struct meson_cipher_tfm_ctx
*op
= crypto_skcipher_ctx(tfm
);
297 struct meson_cipher_req_ctx
*rctx
= skcipher_request_ctx(areq
);
298 struct crypto_engine
*engine
;
301 rctx
->op_dir
= MESON_ENCRYPT
;
302 if (meson_cipher_need_fallback(areq
))
303 return meson_cipher_do_fallback(areq
);
304 e
= get_engine_number(op
->mc
);
305 engine
= op
->mc
->chanlist
[e
].engine
;
308 return crypto_transfer_skcipher_request_to_engine(engine
, areq
);
311 int meson_cipher_init(struct crypto_tfm
*tfm
)
313 struct meson_cipher_tfm_ctx
*op
= crypto_tfm_ctx(tfm
);
314 struct meson_alg_template
*algt
;
315 const char *name
= crypto_tfm_alg_name(tfm
);
316 struct crypto_skcipher
*sktfm
= __crypto_skcipher_cast(tfm
);
317 struct skcipher_alg
*alg
= crypto_skcipher_alg(sktfm
);
319 memset(op
, 0, sizeof(struct meson_cipher_tfm_ctx
));
321 algt
= container_of(alg
, struct meson_alg_template
, alg
.skcipher
);
324 sktfm
->reqsize
= sizeof(struct meson_cipher_req_ctx
);
326 op
->fallback_tfm
= crypto_alloc_sync_skcipher(name
, 0, CRYPTO_ALG_NEED_FALLBACK
);
327 if (IS_ERR(op
->fallback_tfm
)) {
328 dev_err(op
->mc
->dev
, "ERROR: Cannot allocate fallback for %s %ld\n",
329 name
, PTR_ERR(op
->fallback_tfm
));
330 return PTR_ERR(op
->fallback_tfm
);
333 op
->enginectx
.op
.do_one_request
= meson_handle_cipher_request
;
334 op
->enginectx
.op
.prepare_request
= NULL
;
335 op
->enginectx
.op
.unprepare_request
= NULL
;
340 void meson_cipher_exit(struct crypto_tfm
*tfm
)
342 struct meson_cipher_tfm_ctx
*op
= crypto_tfm_ctx(tfm
);
345 memzero_explicit(op
->key
, op
->keylen
);
348 crypto_free_sync_skcipher(op
->fallback_tfm
);
351 int meson_aes_setkey(struct crypto_skcipher
*tfm
, const u8
*key
,
354 struct meson_cipher_tfm_ctx
*op
= crypto_skcipher_ctx(tfm
);
355 struct meson_dev
*mc
= op
->mc
;
359 op
->keymode
= MODE_AES_128
;
362 op
->keymode
= MODE_AES_192
;
365 op
->keymode
= MODE_AES_256
;
368 dev_dbg(mc
->dev
, "ERROR: Invalid keylen %u\n", keylen
);
372 memzero_explicit(op
->key
, op
->keylen
);
376 op
->key
= kmemdup(key
, keylen
, GFP_KERNEL
| GFP_DMA
);
380 return crypto_sync_skcipher_setkey(op
->fallback_tfm
, key
, keylen
);