2 * sun4i-ss-cipher.c - hardware cryptographic accelerator for Allwinner A20 SoC
4 * Copyright (C) 2013-2015 Corentin LABBE <clabbe.montjoie@gmail.com>
6 * This file add support for AES cipher with 128,192,256 bits
7 * keysize in CBC and ECB mode.
8 * Add support also for DES and 3DES in CBC and ECB mode.
10 * You could find the datasheet in Documentation/arm/sunxi/README
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2 of the License, or
15 * (at your option) any later version.
19 static int sun4i_ss_opti_poll(struct ablkcipher_request
*areq
)
21 struct crypto_ablkcipher
*tfm
= crypto_ablkcipher_reqtfm(areq
);
22 struct sun4i_tfm_ctx
*op
= crypto_ablkcipher_ctx(tfm
);
23 struct sun4i_ss_ctx
*ss
= op
->ss
;
24 unsigned int ivsize
= crypto_ablkcipher_ivsize(tfm
);
25 struct sun4i_cipher_req_ctx
*ctx
= ablkcipher_request_ctx(areq
);
27 /* when activating SS, the default FIFO space is SS_RX_DEFAULT(32) */
28 u32 rx_cnt
= SS_RX_DEFAULT
;
33 unsigned int ileft
= areq
->nbytes
;
34 unsigned int oleft
= areq
->nbytes
;
36 struct sg_mapping_iter mi
, mo
;
37 unsigned int oi
, oo
; /* offset for in and out */
40 if (areq
->nbytes
== 0)
44 dev_err_ratelimited(ss
->dev
, "ERROR: Empty IV\n");
48 if (!areq
->src
|| !areq
->dst
) {
49 dev_err_ratelimited(ss
->dev
, "ERROR: Some SGs are NULL\n");
53 spin_lock_irqsave(&ss
->slock
, flags
);
55 for (i
= 0; i
< op
->keylen
; i
+= 4)
56 writel(*(op
->key
+ i
/ 4), ss
->base
+ SS_KEY0
+ i
);
59 for (i
= 0; i
< 4 && i
< ivsize
/ 4; i
++) {
60 v
= *(u32
*)(areq
->info
+ i
* 4);
61 writel(v
, ss
->base
+ SS_IV0
+ i
* 4);
64 writel(mode
, ss
->base
+ SS_CTL
);
66 sg_miter_start(&mi
, areq
->src
, sg_nents(areq
->src
),
67 SG_MITER_FROM_SG
| SG_MITER_ATOMIC
);
68 sg_miter_start(&mo
, areq
->dst
, sg_nents(areq
->dst
),
69 SG_MITER_TO_SG
| SG_MITER_ATOMIC
);
72 if (!mi
.addr
|| !mo
.addr
) {
73 dev_err_ratelimited(ss
->dev
, "ERROR: sg_miter return null\n");
78 ileft
= areq
->nbytes
/ 4;
79 oleft
= areq
->nbytes
/ 4;
83 todo
= min3(rx_cnt
, ileft
, (mi
.length
- oi
) / 4);
86 writesl(ss
->base
+ SS_RXFIFO
, mi
.addr
+ oi
, todo
);
89 if (oi
== mi
.length
) {
94 spaces
= readl(ss
->base
+ SS_FCSR
);
95 rx_cnt
= SS_RXFIFO_SPACES(spaces
);
96 tx_cnt
= SS_TXFIFO_SPACES(spaces
);
98 todo
= min3(tx_cnt
, oleft
, (mo
.length
- oo
) / 4);
101 readsl(ss
->base
+ SS_TXFIFO
, mo
.addr
+ oo
, todo
);
104 if (oo
== mo
.length
) {
111 for (i
= 0; i
< 4 && i
< ivsize
/ 4; i
++) {
112 v
= readl(ss
->base
+ SS_IV0
+ i
* 4);
113 *(u32
*)(areq
->info
+ i
* 4) = v
;
120 writel(0, ss
->base
+ SS_CTL
);
121 spin_unlock_irqrestore(&ss
->slock
, flags
);
125 /* Generic function that support SG with size not multiple of 4 */
126 static int sun4i_ss_cipher_poll(struct ablkcipher_request
*areq
)
128 struct crypto_ablkcipher
*tfm
= crypto_ablkcipher_reqtfm(areq
);
129 struct sun4i_tfm_ctx
*op
= crypto_ablkcipher_ctx(tfm
);
130 struct sun4i_ss_ctx
*ss
= op
->ss
;
132 struct scatterlist
*in_sg
= areq
->src
;
133 struct scatterlist
*out_sg
= areq
->dst
;
134 unsigned int ivsize
= crypto_ablkcipher_ivsize(tfm
);
135 struct sun4i_cipher_req_ctx
*ctx
= ablkcipher_request_ctx(areq
);
136 u32 mode
= ctx
->mode
;
137 /* when activating SS, the default FIFO space is SS_RX_DEFAULT(32) */
138 u32 rx_cnt
= SS_RX_DEFAULT
;
143 unsigned int ileft
= areq
->nbytes
;
144 unsigned int oleft
= areq
->nbytes
;
146 struct sg_mapping_iter mi
, mo
;
147 unsigned int oi
, oo
; /* offset for in and out */
148 char buf
[4 * SS_RX_MAX
];/* buffer for linearize SG src */
149 char bufo
[4 * SS_TX_MAX
]; /* buffer for linearize SG dst */
150 unsigned int ob
= 0; /* offset in buf */
151 unsigned int obo
= 0; /* offset in bufo*/
152 unsigned int obl
= 0; /* length of data in bufo */
155 if (areq
->nbytes
== 0)
159 dev_err_ratelimited(ss
->dev
, "ERROR: Empty IV\n");
163 if (!areq
->src
|| !areq
->dst
) {
164 dev_err_ratelimited(ss
->dev
, "ERROR: Some SGs are NULL\n");
169 * if we have only SGs with size multiple of 4,
170 * we can use the SS optimized function
172 while (in_sg
&& no_chunk
== 1) {
173 if ((in_sg
->length
% 4) != 0)
175 in_sg
= sg_next(in_sg
);
177 while (out_sg
&& no_chunk
== 1) {
178 if ((out_sg
->length
% 4) != 0)
180 out_sg
= sg_next(out_sg
);
184 return sun4i_ss_opti_poll(areq
);
186 spin_lock_irqsave(&ss
->slock
, flags
);
188 for (i
= 0; i
< op
->keylen
; i
+= 4)
189 writel(*(op
->key
+ i
/ 4), ss
->base
+ SS_KEY0
+ i
);
192 for (i
= 0; i
< 4 && i
< ivsize
/ 4; i
++) {
193 v
= *(u32
*)(areq
->info
+ i
* 4);
194 writel(v
, ss
->base
+ SS_IV0
+ i
* 4);
197 writel(mode
, ss
->base
+ SS_CTL
);
199 sg_miter_start(&mi
, areq
->src
, sg_nents(areq
->src
),
200 SG_MITER_FROM_SG
| SG_MITER_ATOMIC
);
201 sg_miter_start(&mo
, areq
->dst
, sg_nents(areq
->dst
),
202 SG_MITER_TO_SG
| SG_MITER_ATOMIC
);
205 if (!mi
.addr
|| !mo
.addr
) {
206 dev_err_ratelimited(ss
->dev
, "ERROR: sg_miter return null\n");
210 ileft
= areq
->nbytes
;
211 oleft
= areq
->nbytes
;
218 * todo is the number of consecutive 4byte word that we
219 * can read from current SG
221 todo
= min3(rx_cnt
, ileft
/ 4, (mi
.length
- oi
) / 4);
222 if (todo
> 0 && ob
== 0) {
223 writesl(ss
->base
+ SS_RXFIFO
, mi
.addr
+ oi
,
229 * not enough consecutive bytes, so we need to
230 * linearize in buf. todo is in bytes
231 * After that copy, if we have a multiple of 4
232 * we need to be able to write all buf in one
233 * pass, so it is why we min() with rx_cnt
235 todo
= min3(rx_cnt
* 4 - ob
, ileft
,
237 memcpy(buf
+ ob
, mi
.addr
+ oi
, todo
);
242 writesl(ss
->base
+ SS_RXFIFO
, buf
,
247 if (oi
== mi
.length
) {
253 spaces
= readl(ss
->base
+ SS_FCSR
);
254 rx_cnt
= SS_RXFIFO_SPACES(spaces
);
255 tx_cnt
= SS_TXFIFO_SPACES(spaces
);
256 dev_dbg(ss
->dev
, "%x %u/%u %u/%u cnt=%u %u/%u %u/%u cnt=%u %u\n",
258 oi
, mi
.length
, ileft
, areq
->nbytes
, rx_cnt
,
259 oo
, mo
.length
, oleft
, areq
->nbytes
, tx_cnt
, ob
);
263 /* todo in 4bytes word */
264 todo
= min3(tx_cnt
, oleft
/ 4, (mo
.length
- oo
) / 4);
266 readsl(ss
->base
+ SS_TXFIFO
, mo
.addr
+ oo
, todo
);
269 if (oo
== mo
.length
) {
275 * read obl bytes in bufo, we read at maximum for
276 * emptying the device
278 readsl(ss
->base
+ SS_TXFIFO
, bufo
, tx_cnt
);
283 * how many bytes we can copy ?
284 * no more than remaining SG size
285 * no more than remaining buffer
286 * no need to test against oleft
288 todo
= min(mo
.length
- oo
, obl
- obo
);
289 memcpy(mo
.addr
+ oo
, bufo
+ obo
, todo
);
293 if (oo
== mo
.length
) {
298 /* bufo must be fully used here */
302 for (i
= 0; i
< 4 && i
< ivsize
/ 4; i
++) {
303 v
= readl(ss
->base
+ SS_IV0
+ i
* 4);
304 *(u32
*)(areq
->info
+ i
* 4) = v
;
311 writel(0, ss
->base
+ SS_CTL
);
312 spin_unlock_irqrestore(&ss
->slock
, flags
);
318 int sun4i_ss_cbc_aes_encrypt(struct ablkcipher_request
*areq
)
320 struct crypto_ablkcipher
*tfm
= crypto_ablkcipher_reqtfm(areq
);
321 struct sun4i_tfm_ctx
*op
= crypto_ablkcipher_ctx(tfm
);
322 struct sun4i_cipher_req_ctx
*rctx
= ablkcipher_request_ctx(areq
);
324 rctx
->mode
= SS_OP_AES
| SS_CBC
| SS_ENABLED
| SS_ENCRYPTION
|
326 return sun4i_ss_cipher_poll(areq
);
329 int sun4i_ss_cbc_aes_decrypt(struct ablkcipher_request
*areq
)
331 struct crypto_ablkcipher
*tfm
= crypto_ablkcipher_reqtfm(areq
);
332 struct sun4i_tfm_ctx
*op
= crypto_ablkcipher_ctx(tfm
);
333 struct sun4i_cipher_req_ctx
*rctx
= ablkcipher_request_ctx(areq
);
335 rctx
->mode
= SS_OP_AES
| SS_CBC
| SS_ENABLED
| SS_DECRYPTION
|
337 return sun4i_ss_cipher_poll(areq
);
341 int sun4i_ss_ecb_aes_encrypt(struct ablkcipher_request
*areq
)
343 struct crypto_ablkcipher
*tfm
= crypto_ablkcipher_reqtfm(areq
);
344 struct sun4i_tfm_ctx
*op
= crypto_ablkcipher_ctx(tfm
);
345 struct sun4i_cipher_req_ctx
*rctx
= ablkcipher_request_ctx(areq
);
347 rctx
->mode
= SS_OP_AES
| SS_ECB
| SS_ENABLED
| SS_ENCRYPTION
|
349 return sun4i_ss_cipher_poll(areq
);
352 int sun4i_ss_ecb_aes_decrypt(struct ablkcipher_request
*areq
)
354 struct crypto_ablkcipher
*tfm
= crypto_ablkcipher_reqtfm(areq
);
355 struct sun4i_tfm_ctx
*op
= crypto_ablkcipher_ctx(tfm
);
356 struct sun4i_cipher_req_ctx
*rctx
= ablkcipher_request_ctx(areq
);
358 rctx
->mode
= SS_OP_AES
| SS_ECB
| SS_ENABLED
| SS_DECRYPTION
|
360 return sun4i_ss_cipher_poll(areq
);
364 int sun4i_ss_cbc_des_encrypt(struct ablkcipher_request
*areq
)
366 struct crypto_ablkcipher
*tfm
= crypto_ablkcipher_reqtfm(areq
);
367 struct sun4i_tfm_ctx
*op
= crypto_ablkcipher_ctx(tfm
);
368 struct sun4i_cipher_req_ctx
*rctx
= ablkcipher_request_ctx(areq
);
370 rctx
->mode
= SS_OP_DES
| SS_CBC
| SS_ENABLED
| SS_ENCRYPTION
|
372 return sun4i_ss_cipher_poll(areq
);
375 int sun4i_ss_cbc_des_decrypt(struct ablkcipher_request
*areq
)
377 struct crypto_ablkcipher
*tfm
= crypto_ablkcipher_reqtfm(areq
);
378 struct sun4i_tfm_ctx
*op
= crypto_ablkcipher_ctx(tfm
);
379 struct sun4i_cipher_req_ctx
*rctx
= ablkcipher_request_ctx(areq
);
381 rctx
->mode
= SS_OP_DES
| SS_CBC
| SS_ENABLED
| SS_DECRYPTION
|
383 return sun4i_ss_cipher_poll(areq
);
387 int sun4i_ss_ecb_des_encrypt(struct ablkcipher_request
*areq
)
389 struct crypto_ablkcipher
*tfm
= crypto_ablkcipher_reqtfm(areq
);
390 struct sun4i_tfm_ctx
*op
= crypto_ablkcipher_ctx(tfm
);
391 struct sun4i_cipher_req_ctx
*rctx
= ablkcipher_request_ctx(areq
);
393 rctx
->mode
= SS_OP_DES
| SS_ECB
| SS_ENABLED
| SS_ENCRYPTION
|
395 return sun4i_ss_cipher_poll(areq
);
398 int sun4i_ss_ecb_des_decrypt(struct ablkcipher_request
*areq
)
400 struct crypto_ablkcipher
*tfm
= crypto_ablkcipher_reqtfm(areq
);
401 struct sun4i_tfm_ctx
*op
= crypto_ablkcipher_ctx(tfm
);
402 struct sun4i_cipher_req_ctx
*rctx
= ablkcipher_request_ctx(areq
);
404 rctx
->mode
= SS_OP_DES
| SS_ECB
| SS_ENABLED
| SS_DECRYPTION
|
406 return sun4i_ss_cipher_poll(areq
);
410 int sun4i_ss_cbc_des3_encrypt(struct ablkcipher_request
*areq
)
412 struct crypto_ablkcipher
*tfm
= crypto_ablkcipher_reqtfm(areq
);
413 struct sun4i_tfm_ctx
*op
= crypto_ablkcipher_ctx(tfm
);
414 struct sun4i_cipher_req_ctx
*rctx
= ablkcipher_request_ctx(areq
);
416 rctx
->mode
= SS_OP_3DES
| SS_CBC
| SS_ENABLED
| SS_ENCRYPTION
|
418 return sun4i_ss_cipher_poll(areq
);
421 int sun4i_ss_cbc_des3_decrypt(struct ablkcipher_request
*areq
)
423 struct crypto_ablkcipher
*tfm
= crypto_ablkcipher_reqtfm(areq
);
424 struct sun4i_tfm_ctx
*op
= crypto_ablkcipher_ctx(tfm
);
425 struct sun4i_cipher_req_ctx
*rctx
= ablkcipher_request_ctx(areq
);
427 rctx
->mode
= SS_OP_3DES
| SS_CBC
| SS_ENABLED
| SS_DECRYPTION
|
429 return sun4i_ss_cipher_poll(areq
);
433 int sun4i_ss_ecb_des3_encrypt(struct ablkcipher_request
*areq
)
435 struct crypto_ablkcipher
*tfm
= crypto_ablkcipher_reqtfm(areq
);
436 struct sun4i_tfm_ctx
*op
= crypto_ablkcipher_ctx(tfm
);
437 struct sun4i_cipher_req_ctx
*rctx
= ablkcipher_request_ctx(areq
);
439 rctx
->mode
= SS_OP_3DES
| SS_ECB
| SS_ENABLED
| SS_ENCRYPTION
|
441 return sun4i_ss_cipher_poll(areq
);
444 int sun4i_ss_ecb_des3_decrypt(struct ablkcipher_request
*areq
)
446 struct crypto_ablkcipher
*tfm
= crypto_ablkcipher_reqtfm(areq
);
447 struct sun4i_tfm_ctx
*op
= crypto_ablkcipher_ctx(tfm
);
448 struct sun4i_cipher_req_ctx
*rctx
= ablkcipher_request_ctx(areq
);
450 rctx
->mode
= SS_OP_3DES
| SS_ECB
| SS_ENABLED
| SS_DECRYPTION
|
452 return sun4i_ss_cipher_poll(areq
);
455 int sun4i_ss_cipher_init(struct crypto_tfm
*tfm
)
457 struct sun4i_tfm_ctx
*op
= crypto_tfm_ctx(tfm
);
458 struct crypto_alg
*alg
= tfm
->__crt_alg
;
459 struct sun4i_ss_alg_template
*algt
;
461 memset(op
, 0, sizeof(struct sun4i_tfm_ctx
));
463 algt
= container_of(alg
, struct sun4i_ss_alg_template
, alg
.crypto
);
466 tfm
->crt_ablkcipher
.reqsize
= sizeof(struct sun4i_cipher_req_ctx
);
471 /* check and set the AES key, prepare the mode to be used */
472 int sun4i_ss_aes_setkey(struct crypto_ablkcipher
*tfm
, const u8
*key
,
475 struct sun4i_tfm_ctx
*op
= crypto_ablkcipher_ctx(tfm
);
476 struct sun4i_ss_ctx
*ss
= op
->ss
;
480 op
->keymode
= SS_AES_128BITS
;
483 op
->keymode
= SS_AES_192BITS
;
486 op
->keymode
= SS_AES_256BITS
;
489 dev_err(ss
->dev
, "ERROR: Invalid keylen %u\n", keylen
);
490 crypto_ablkcipher_set_flags(tfm
, CRYPTO_TFM_RES_BAD_KEY_LEN
);
494 memcpy(op
->key
, key
, keylen
);
498 /* check and set the DES key, prepare the mode to be used */
499 int sun4i_ss_des_setkey(struct crypto_ablkcipher
*tfm
, const u8
*key
,
502 struct sun4i_tfm_ctx
*op
= crypto_ablkcipher_ctx(tfm
);
503 struct sun4i_ss_ctx
*ss
= op
->ss
;
505 u32 tmp
[DES_EXPKEY_WORDS
];
508 if (unlikely(keylen
!= DES_KEY_SIZE
)) {
509 dev_err(ss
->dev
, "Invalid keylen %u\n", keylen
);
510 crypto_ablkcipher_set_flags(tfm
, CRYPTO_TFM_RES_BAD_KEY_LEN
);
514 flags
= crypto_ablkcipher_get_flags(tfm
);
516 ret
= des_ekey(tmp
, key
);
517 if (unlikely(ret
== 0) && (flags
& CRYPTO_TFM_REQ_WEAK_KEY
)) {
518 crypto_ablkcipher_set_flags(tfm
, CRYPTO_TFM_RES_WEAK_KEY
);
519 dev_dbg(ss
->dev
, "Weak key %u\n", keylen
);
524 memcpy(op
->key
, key
, keylen
);
528 /* check and set the 3DES key, prepare the mode to be used */
529 int sun4i_ss_des3_setkey(struct crypto_ablkcipher
*tfm
, const u8
*key
,
532 struct sun4i_tfm_ctx
*op
= crypto_ablkcipher_ctx(tfm
);
533 struct sun4i_ss_ctx
*ss
= op
->ss
;
535 if (unlikely(keylen
!= 3 * DES_KEY_SIZE
)) {
536 dev_err(ss
->dev
, "Invalid keylen %u\n", keylen
);
537 crypto_ablkcipher_set_flags(tfm
, CRYPTO_TFM_RES_BAD_KEY_LEN
);
541 memcpy(op
->key
, key
, keylen
);