1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * sun4i-ss-cipher.c - hardware cryptographic accelerator for Allwinner A20 SoC
5 * Copyright (C) 2013-2015 Corentin LABBE <clabbe.montjoie@gmail.com>
7 * This file add support for AES cipher with 128,192,256 bits
8 * keysize in CBC and ECB mode.
9 * Add support also for DES and 3DES in CBC and ECB mode.
11 * You could find the datasheet in Documentation/arch/arm/sunxi.rst
15 static int noinline_for_stack
sun4i_ss_opti_poll(struct skcipher_request
*areq
)
17 struct crypto_skcipher
*tfm
= crypto_skcipher_reqtfm(areq
);
18 struct sun4i_tfm_ctx
*op
= crypto_skcipher_ctx(tfm
);
19 struct sun4i_ss_ctx
*ss
= op
->ss
;
20 unsigned int ivsize
= crypto_skcipher_ivsize(tfm
);
21 struct sun4i_cipher_req_ctx
*ctx
= skcipher_request_ctx(areq
);
23 /* when activating SS, the default FIFO space is SS_RX_DEFAULT(32) */
24 u32 rx_cnt
= SS_RX_DEFAULT
;
30 unsigned int ileft
= areq
->cryptlen
;
31 unsigned int oleft
= areq
->cryptlen
;
33 unsigned long pi
= 0, po
= 0; /* progress for in and out */
35 struct sg_mapping_iter mi
, mo
;
36 unsigned int oi
, oo
; /* offset for in and out */
38 struct skcipher_alg
*alg
= crypto_skcipher_alg(tfm
);
39 struct sun4i_ss_alg_template
*algt
;
44 if (!areq
->src
|| !areq
->dst
) {
45 dev_err_ratelimited(ss
->dev
, "ERROR: Some SGs are NULL\n");
49 if (areq
->iv
&& ivsize
> 0 && mode
& SS_DECRYPTION
) {
50 scatterwalk_map_and_copy(ctx
->backup_iv
, areq
->src
,
51 areq
->cryptlen
- ivsize
, ivsize
, 0);
54 if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN4I_SS_DEBUG
)) {
55 algt
= container_of(alg
, struct sun4i_ss_alg_template
, alg
.crypto
);
57 algt
->stat_bytes
+= areq
->cryptlen
;
60 spin_lock_irqsave(&ss
->slock
, flags
);
62 for (i
= 0; i
< op
->keylen
/ 4; i
++)
63 writesl(ss
->base
+ SS_KEY0
+ i
* 4, &op
->key
[i
], 1);
66 for (i
= 0; i
< 4 && i
< ivsize
/ 4; i
++) {
67 v
= *(u32
*)(areq
->iv
+ i
* 4);
68 writesl(ss
->base
+ SS_IV0
+ i
* 4, &v
, 1);
71 writel(mode
, ss
->base
+ SS_CTL
);
74 ileft
= areq
->cryptlen
/ 4;
75 oleft
= areq
->cryptlen
/ 4;
80 sg_miter_start(&mi
, areq
->src
, sg_nents(areq
->src
),
81 SG_MITER_FROM_SG
| SG_MITER_ATOMIC
);
83 sg_miter_skip(&mi
, pi
);
84 miter_err
= sg_miter_next(&mi
);
85 if (!miter_err
|| !mi
.addr
) {
86 dev_err_ratelimited(ss
->dev
, "ERROR: sg_miter return null\n");
90 todo
= min(rx_cnt
, ileft
);
91 todo
= min_t(size_t, todo
, (mi
.length
- oi
) / 4);
94 writesl(ss
->base
+ SS_RXFIFO
, mi
.addr
+ oi
, todo
);
97 if (oi
== mi
.length
) {
104 spaces
= readl(ss
->base
+ SS_FCSR
);
105 rx_cnt
= SS_RXFIFO_SPACES(spaces
);
106 tx_cnt
= SS_TXFIFO_SPACES(spaces
);
108 sg_miter_start(&mo
, areq
->dst
, sg_nents(areq
->dst
),
109 SG_MITER_TO_SG
| SG_MITER_ATOMIC
);
111 sg_miter_skip(&mo
, po
);
112 miter_err
= sg_miter_next(&mo
);
113 if (!miter_err
|| !mo
.addr
) {
114 dev_err_ratelimited(ss
->dev
, "ERROR: sg_miter return null\n");
118 todo
= min(tx_cnt
, oleft
);
119 todo
= min_t(size_t, todo
, (mo
.length
- oo
) / 4);
122 readsl(ss
->base
+ SS_TXFIFO
, mo
.addr
+ oo
, todo
);
125 if (oo
== mo
.length
) {
133 if (mode
& SS_DECRYPTION
) {
134 memcpy(areq
->iv
, ctx
->backup_iv
, ivsize
);
135 memzero_explicit(ctx
->backup_iv
, ivsize
);
137 scatterwalk_map_and_copy(areq
->iv
, areq
->dst
, areq
->cryptlen
- ivsize
,
143 writel(0, ss
->base
+ SS_CTL
);
144 spin_unlock_irqrestore(&ss
->slock
, flags
);
148 static int noinline_for_stack
sun4i_ss_cipher_poll_fallback(struct skcipher_request
*areq
)
150 struct crypto_skcipher
*tfm
= crypto_skcipher_reqtfm(areq
);
151 struct sun4i_tfm_ctx
*op
= crypto_skcipher_ctx(tfm
);
152 struct sun4i_cipher_req_ctx
*ctx
= skcipher_request_ctx(areq
);
154 struct skcipher_alg
*alg
= crypto_skcipher_alg(tfm
);
155 struct sun4i_ss_alg_template
*algt
;
157 if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN4I_SS_DEBUG
)) {
158 algt
= container_of(alg
, struct sun4i_ss_alg_template
, alg
.crypto
);
162 skcipher_request_set_tfm(&ctx
->fallback_req
, op
->fallback_tfm
);
163 skcipher_request_set_callback(&ctx
->fallback_req
, areq
->base
.flags
,
164 areq
->base
.complete
, areq
->base
.data
);
165 skcipher_request_set_crypt(&ctx
->fallback_req
, areq
->src
, areq
->dst
,
166 areq
->cryptlen
, areq
->iv
);
167 if (ctx
->mode
& SS_DECRYPTION
)
168 err
= crypto_skcipher_decrypt(&ctx
->fallback_req
);
170 err
= crypto_skcipher_encrypt(&ctx
->fallback_req
);
175 /* Generic function that support SG with size not multiple of 4 */
176 static int sun4i_ss_cipher_poll(struct skcipher_request
*areq
)
178 struct crypto_skcipher
*tfm
= crypto_skcipher_reqtfm(areq
);
179 struct sun4i_tfm_ctx
*op
= crypto_skcipher_ctx(tfm
);
180 struct sun4i_ss_ctx
*ss
= op
->ss
;
182 struct scatterlist
*in_sg
= areq
->src
;
183 struct scatterlist
*out_sg
= areq
->dst
;
184 unsigned int ivsize
= crypto_skcipher_ivsize(tfm
);
185 struct sun4i_cipher_req_ctx
*ctx
= skcipher_request_ctx(areq
);
186 struct skcipher_alg
*alg
= crypto_skcipher_alg(tfm
);
187 struct sun4i_ss_alg_template
*algt
;
188 u32 mode
= ctx
->mode
;
189 /* when activating SS, the default FIFO space is SS_RX_DEFAULT(32) */
190 u32 rx_cnt
= SS_RX_DEFAULT
;
196 unsigned int ileft
= areq
->cryptlen
;
197 unsigned int oleft
= areq
->cryptlen
;
199 struct sg_mapping_iter mi
, mo
;
200 unsigned long pi
= 0, po
= 0; /* progress for in and out */
202 unsigned int oi
, oo
; /* offset for in and out */
203 unsigned int ob
= 0; /* offset in buf */
204 unsigned int obo
= 0; /* offset in bufo*/
205 unsigned int obl
= 0; /* length of data in bufo */
207 bool need_fallback
= false;
212 if (!areq
->src
|| !areq
->dst
) {
213 dev_err_ratelimited(ss
->dev
, "ERROR: Some SGs are NULL\n");
217 algt
= container_of(alg
, struct sun4i_ss_alg_template
, alg
.crypto
);
218 if (areq
->cryptlen
% algt
->alg
.crypto
.base
.cra_blocksize
)
219 need_fallback
= true;
222 * if we have only SGs with size multiple of 4,
223 * we can use the SS optimized function
225 while (in_sg
&& no_chunk
== 1) {
226 if ((in_sg
->length
| in_sg
->offset
) & 3u)
228 in_sg
= sg_next(in_sg
);
230 while (out_sg
&& no_chunk
== 1) {
231 if ((out_sg
->length
| out_sg
->offset
) & 3u)
233 out_sg
= sg_next(out_sg
);
236 if (no_chunk
== 1 && !need_fallback
)
237 return sun4i_ss_opti_poll(areq
);
240 return sun4i_ss_cipher_poll_fallback(areq
);
242 if (areq
->iv
&& ivsize
> 0 && mode
& SS_DECRYPTION
) {
243 scatterwalk_map_and_copy(ctx
->backup_iv
, areq
->src
,
244 areq
->cryptlen
- ivsize
, ivsize
, 0);
247 if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN4I_SS_DEBUG
)) {
249 algt
->stat_bytes
+= areq
->cryptlen
;
252 spin_lock_irqsave(&ss
->slock
, flags
);
254 for (i
= 0; i
< op
->keylen
/ 4; i
++)
255 writesl(ss
->base
+ SS_KEY0
+ i
* 4, &op
->key
[i
], 1);
258 for (i
= 0; i
< 4 && i
< ivsize
/ 4; i
++) {
259 v
= *(u32
*)(areq
->iv
+ i
* 4);
260 writesl(ss
->base
+ SS_IV0
+ i
* 4, &v
, 1);
263 writel(mode
, ss
->base
+ SS_CTL
);
265 ileft
= areq
->cryptlen
;
266 oleft
= areq
->cryptlen
;
272 sg_miter_start(&mi
, areq
->src
, sg_nents(areq
->src
),
273 SG_MITER_FROM_SG
| SG_MITER_ATOMIC
);
275 sg_miter_skip(&mi
, pi
);
276 miter_err
= sg_miter_next(&mi
);
277 if (!miter_err
|| !mi
.addr
) {
278 dev_err_ratelimited(ss
->dev
, "ERROR: sg_miter return null\n");
283 * todo is the number of consecutive 4byte word that we
284 * can read from current SG
286 todo
= min(rx_cnt
, ileft
/ 4);
287 todo
= min_t(size_t, todo
, (mi
.length
- oi
) / 4);
289 writesl(ss
->base
+ SS_RXFIFO
, mi
.addr
+ oi
,
295 * not enough consecutive bytes, so we need to
296 * linearize in buf. todo is in bytes
297 * After that copy, if we have a multiple of 4
298 * we need to be able to write all buf in one
299 * pass, so it is why we min() with rx_cnt
301 todo
= min(rx_cnt
* 4 - ob
, ileft
);
302 todo
= min_t(size_t, todo
, mi
.length
- oi
);
303 memcpy(ss
->buf
+ ob
, mi
.addr
+ oi
, todo
);
308 writesl(ss
->base
+ SS_RXFIFO
, ss
->buf
,
313 if (oi
== mi
.length
) {
320 spaces
= readl(ss
->base
+ SS_FCSR
);
321 rx_cnt
= SS_RXFIFO_SPACES(spaces
);
322 tx_cnt
= SS_TXFIFO_SPACES(spaces
);
326 sg_miter_start(&mo
, areq
->dst
, sg_nents(areq
->dst
),
327 SG_MITER_TO_SG
| SG_MITER_ATOMIC
);
329 sg_miter_skip(&mo
, po
);
330 miter_err
= sg_miter_next(&mo
);
331 if (!miter_err
|| !mo
.addr
) {
332 dev_err_ratelimited(ss
->dev
, "ERROR: sg_miter return null\n");
336 /* todo in 4bytes word */
337 todo
= min(tx_cnt
, oleft
/ 4);
338 todo
= min_t(size_t, todo
, (mo
.length
- oo
) / 4);
341 readsl(ss
->base
+ SS_TXFIFO
, mo
.addr
+ oo
, todo
);
344 if (oo
== mo
.length
) {
350 * read obl bytes in bufo, we read at maximum for
351 * emptying the device
353 readsl(ss
->base
+ SS_TXFIFO
, ss
->bufo
, tx_cnt
);
358 * how many bytes we can copy ?
359 * no more than remaining SG size
360 * no more than remaining buffer
361 * no need to test against oleft
364 mo
.length
- oo
, obl
- obo
);
365 memcpy(mo
.addr
+ oo
, ss
->bufo
+ obo
, todo
);
369 if (oo
== mo
.length
) {
375 /* bufo must be fully used here */
380 if (mode
& SS_DECRYPTION
) {
381 memcpy(areq
->iv
, ctx
->backup_iv
, ivsize
);
382 memzero_explicit(ctx
->backup_iv
, ivsize
);
384 scatterwalk_map_and_copy(areq
->iv
, areq
->dst
, areq
->cryptlen
- ivsize
,
390 writel(0, ss
->base
+ SS_CTL
);
391 spin_unlock_irqrestore(&ss
->slock
, flags
);
397 int sun4i_ss_cbc_aes_encrypt(struct skcipher_request
*areq
)
399 struct crypto_skcipher
*tfm
= crypto_skcipher_reqtfm(areq
);
400 struct sun4i_tfm_ctx
*op
= crypto_skcipher_ctx(tfm
);
401 struct sun4i_cipher_req_ctx
*rctx
= skcipher_request_ctx(areq
);
403 rctx
->mode
= SS_OP_AES
| SS_CBC
| SS_ENABLED
| SS_ENCRYPTION
|
405 return sun4i_ss_cipher_poll(areq
);
408 int sun4i_ss_cbc_aes_decrypt(struct skcipher_request
*areq
)
410 struct crypto_skcipher
*tfm
= crypto_skcipher_reqtfm(areq
);
411 struct sun4i_tfm_ctx
*op
= crypto_skcipher_ctx(tfm
);
412 struct sun4i_cipher_req_ctx
*rctx
= skcipher_request_ctx(areq
);
414 rctx
->mode
= SS_OP_AES
| SS_CBC
| SS_ENABLED
| SS_DECRYPTION
|
416 return sun4i_ss_cipher_poll(areq
);
420 int sun4i_ss_ecb_aes_encrypt(struct skcipher_request
*areq
)
422 struct crypto_skcipher
*tfm
= crypto_skcipher_reqtfm(areq
);
423 struct sun4i_tfm_ctx
*op
= crypto_skcipher_ctx(tfm
);
424 struct sun4i_cipher_req_ctx
*rctx
= skcipher_request_ctx(areq
);
426 rctx
->mode
= SS_OP_AES
| SS_ECB
| SS_ENABLED
| SS_ENCRYPTION
|
428 return sun4i_ss_cipher_poll(areq
);
431 int sun4i_ss_ecb_aes_decrypt(struct skcipher_request
*areq
)
433 struct crypto_skcipher
*tfm
= crypto_skcipher_reqtfm(areq
);
434 struct sun4i_tfm_ctx
*op
= crypto_skcipher_ctx(tfm
);
435 struct sun4i_cipher_req_ctx
*rctx
= skcipher_request_ctx(areq
);
437 rctx
->mode
= SS_OP_AES
| SS_ECB
| SS_ENABLED
| SS_DECRYPTION
|
439 return sun4i_ss_cipher_poll(areq
);
443 int sun4i_ss_cbc_des_encrypt(struct skcipher_request
*areq
)
445 struct crypto_skcipher
*tfm
= crypto_skcipher_reqtfm(areq
);
446 struct sun4i_tfm_ctx
*op
= crypto_skcipher_ctx(tfm
);
447 struct sun4i_cipher_req_ctx
*rctx
= skcipher_request_ctx(areq
);
449 rctx
->mode
= SS_OP_DES
| SS_CBC
| SS_ENABLED
| SS_ENCRYPTION
|
451 return sun4i_ss_cipher_poll(areq
);
454 int sun4i_ss_cbc_des_decrypt(struct skcipher_request
*areq
)
456 struct crypto_skcipher
*tfm
= crypto_skcipher_reqtfm(areq
);
457 struct sun4i_tfm_ctx
*op
= crypto_skcipher_ctx(tfm
);
458 struct sun4i_cipher_req_ctx
*rctx
= skcipher_request_ctx(areq
);
460 rctx
->mode
= SS_OP_DES
| SS_CBC
| SS_ENABLED
| SS_DECRYPTION
|
462 return sun4i_ss_cipher_poll(areq
);
466 int sun4i_ss_ecb_des_encrypt(struct skcipher_request
*areq
)
468 struct crypto_skcipher
*tfm
= crypto_skcipher_reqtfm(areq
);
469 struct sun4i_tfm_ctx
*op
= crypto_skcipher_ctx(tfm
);
470 struct sun4i_cipher_req_ctx
*rctx
= skcipher_request_ctx(areq
);
472 rctx
->mode
= SS_OP_DES
| SS_ECB
| SS_ENABLED
| SS_ENCRYPTION
|
474 return sun4i_ss_cipher_poll(areq
);
477 int sun4i_ss_ecb_des_decrypt(struct skcipher_request
*areq
)
479 struct crypto_skcipher
*tfm
= crypto_skcipher_reqtfm(areq
);
480 struct sun4i_tfm_ctx
*op
= crypto_skcipher_ctx(tfm
);
481 struct sun4i_cipher_req_ctx
*rctx
= skcipher_request_ctx(areq
);
483 rctx
->mode
= SS_OP_DES
| SS_ECB
| SS_ENABLED
| SS_DECRYPTION
|
485 return sun4i_ss_cipher_poll(areq
);
489 int sun4i_ss_cbc_des3_encrypt(struct skcipher_request
*areq
)
491 struct crypto_skcipher
*tfm
= crypto_skcipher_reqtfm(areq
);
492 struct sun4i_tfm_ctx
*op
= crypto_skcipher_ctx(tfm
);
493 struct sun4i_cipher_req_ctx
*rctx
= skcipher_request_ctx(areq
);
495 rctx
->mode
= SS_OP_3DES
| SS_CBC
| SS_ENABLED
| SS_ENCRYPTION
|
497 return sun4i_ss_cipher_poll(areq
);
500 int sun4i_ss_cbc_des3_decrypt(struct skcipher_request
*areq
)
502 struct crypto_skcipher
*tfm
= crypto_skcipher_reqtfm(areq
);
503 struct sun4i_tfm_ctx
*op
= crypto_skcipher_ctx(tfm
);
504 struct sun4i_cipher_req_ctx
*rctx
= skcipher_request_ctx(areq
);
506 rctx
->mode
= SS_OP_3DES
| SS_CBC
| SS_ENABLED
| SS_DECRYPTION
|
508 return sun4i_ss_cipher_poll(areq
);
512 int sun4i_ss_ecb_des3_encrypt(struct skcipher_request
*areq
)
514 struct crypto_skcipher
*tfm
= crypto_skcipher_reqtfm(areq
);
515 struct sun4i_tfm_ctx
*op
= crypto_skcipher_ctx(tfm
);
516 struct sun4i_cipher_req_ctx
*rctx
= skcipher_request_ctx(areq
);
518 rctx
->mode
= SS_OP_3DES
| SS_ECB
| SS_ENABLED
| SS_ENCRYPTION
|
520 return sun4i_ss_cipher_poll(areq
);
523 int sun4i_ss_ecb_des3_decrypt(struct skcipher_request
*areq
)
525 struct crypto_skcipher
*tfm
= crypto_skcipher_reqtfm(areq
);
526 struct sun4i_tfm_ctx
*op
= crypto_skcipher_ctx(tfm
);
527 struct sun4i_cipher_req_ctx
*rctx
= skcipher_request_ctx(areq
);
529 rctx
->mode
= SS_OP_3DES
| SS_ECB
| SS_ENABLED
| SS_DECRYPTION
|
531 return sun4i_ss_cipher_poll(areq
);
534 int sun4i_ss_cipher_init(struct crypto_tfm
*tfm
)
536 struct sun4i_tfm_ctx
*op
= crypto_tfm_ctx(tfm
);
537 struct sun4i_ss_alg_template
*algt
;
538 const char *name
= crypto_tfm_alg_name(tfm
);
541 memset(op
, 0, sizeof(struct sun4i_tfm_ctx
));
543 algt
= container_of(tfm
->__crt_alg
, struct sun4i_ss_alg_template
,
547 op
->fallback_tfm
= crypto_alloc_skcipher(name
, 0, CRYPTO_ALG_NEED_FALLBACK
);
548 if (IS_ERR(op
->fallback_tfm
)) {
549 dev_err(op
->ss
->dev
, "ERROR: Cannot allocate fallback for %s %ld\n",
550 name
, PTR_ERR(op
->fallback_tfm
));
551 return PTR_ERR(op
->fallback_tfm
);
554 crypto_skcipher_set_reqsize(__crypto_skcipher_cast(tfm
),
555 sizeof(struct sun4i_cipher_req_ctx
) +
556 crypto_skcipher_reqsize(op
->fallback_tfm
));
558 err
= pm_runtime_resume_and_get(op
->ss
->dev
);
564 crypto_free_skcipher(op
->fallback_tfm
);
568 void sun4i_ss_cipher_exit(struct crypto_tfm
*tfm
)
570 struct sun4i_tfm_ctx
*op
= crypto_tfm_ctx(tfm
);
572 crypto_free_skcipher(op
->fallback_tfm
);
573 pm_runtime_put(op
->ss
->dev
);
576 /* check and set the AES key, prepare the mode to be used */
577 int sun4i_ss_aes_setkey(struct crypto_skcipher
*tfm
, const u8
*key
,
580 struct sun4i_tfm_ctx
*op
= crypto_skcipher_ctx(tfm
);
581 struct sun4i_ss_ctx
*ss
= op
->ss
;
585 op
->keymode
= SS_AES_128BITS
;
588 op
->keymode
= SS_AES_192BITS
;
591 op
->keymode
= SS_AES_256BITS
;
594 dev_dbg(ss
->dev
, "ERROR: Invalid keylen %u\n", keylen
);
598 memcpy(op
->key
, key
, keylen
);
600 crypto_skcipher_clear_flags(op
->fallback_tfm
, CRYPTO_TFM_REQ_MASK
);
601 crypto_skcipher_set_flags(op
->fallback_tfm
, tfm
->base
.crt_flags
& CRYPTO_TFM_REQ_MASK
);
603 return crypto_skcipher_setkey(op
->fallback_tfm
, key
, keylen
);
606 /* check and set the DES key, prepare the mode to be used */
607 int sun4i_ss_des_setkey(struct crypto_skcipher
*tfm
, const u8
*key
,
610 struct sun4i_tfm_ctx
*op
= crypto_skcipher_ctx(tfm
);
613 err
= verify_skcipher_des_key(tfm
, key
);
618 memcpy(op
->key
, key
, keylen
);
620 crypto_skcipher_clear_flags(op
->fallback_tfm
, CRYPTO_TFM_REQ_MASK
);
621 crypto_skcipher_set_flags(op
->fallback_tfm
, tfm
->base
.crt_flags
& CRYPTO_TFM_REQ_MASK
);
623 return crypto_skcipher_setkey(op
->fallback_tfm
, key
, keylen
);
626 /* check and set the 3DES key, prepare the mode to be used */
627 int sun4i_ss_des3_setkey(struct crypto_skcipher
*tfm
, const u8
*key
,
630 struct sun4i_tfm_ctx
*op
= crypto_skcipher_ctx(tfm
);
633 err
= verify_skcipher_des3_key(tfm
, key
);
638 memcpy(op
->key
, key
, keylen
);
640 crypto_skcipher_clear_flags(op
->fallback_tfm
, CRYPTO_TFM_REQ_MASK
);
641 crypto_skcipher_set_flags(op
->fallback_tfm
, tfm
->base
.crt_flags
& CRYPTO_TFM_REQ_MASK
);
643 return crypto_skcipher_setkey(op
->fallback_tfm
, key
, keylen
);