2 * sun4i-ss-cipher.c - hardware cryptographic accelerator for Allwinner A20 SoC
4 * Copyright (C) 2013-2015 Corentin LABBE <clabbe.montjoie@gmail.com>
6 * This file add support for AES cipher with 128,192,256 bits
7 * keysize in CBC and ECB mode.
8 * Add support also for DES and 3DES in CBC and ECB mode.
10 * You could find the datasheet in Documentation/arm/sunxi/README
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2 of the License, or
15 * (at your option) any later version.
19 static int sun4i_ss_opti_poll(struct skcipher_request
*areq
)
21 struct crypto_skcipher
*tfm
= crypto_skcipher_reqtfm(areq
);
22 struct sun4i_tfm_ctx
*op
= crypto_skcipher_ctx(tfm
);
23 struct sun4i_ss_ctx
*ss
= op
->ss
;
24 unsigned int ivsize
= crypto_skcipher_ivsize(tfm
);
25 struct sun4i_cipher_req_ctx
*ctx
= skcipher_request_ctx(areq
);
27 /* when activating SS, the default FIFO space is SS_RX_DEFAULT(32) */
28 u32 rx_cnt
= SS_RX_DEFAULT
;
34 unsigned int ileft
= areq
->cryptlen
;
35 unsigned int oleft
= areq
->cryptlen
;
37 struct sg_mapping_iter mi
, mo
;
38 unsigned int oi
, oo
; /* offset for in and out */
45 dev_err_ratelimited(ss
->dev
, "ERROR: Empty IV\n");
49 if (!areq
->src
|| !areq
->dst
) {
50 dev_err_ratelimited(ss
->dev
, "ERROR: Some SGs are NULL\n");
54 spin_lock_irqsave(&ss
->slock
, flags
);
56 for (i
= 0; i
< op
->keylen
; i
+= 4)
57 writel(*(op
->key
+ i
/ 4), ss
->base
+ SS_KEY0
+ i
);
60 for (i
= 0; i
< 4 && i
< ivsize
/ 4; i
++) {
61 v
= *(u32
*)(areq
->iv
+ i
* 4);
62 writel(v
, ss
->base
+ SS_IV0
+ i
* 4);
65 writel(mode
, ss
->base
+ SS_CTL
);
67 sg_miter_start(&mi
, areq
->src
, sg_nents(areq
->src
),
68 SG_MITER_FROM_SG
| SG_MITER_ATOMIC
);
69 sg_miter_start(&mo
, areq
->dst
, sg_nents(areq
->dst
),
70 SG_MITER_TO_SG
| SG_MITER_ATOMIC
);
73 if (!mi
.addr
|| !mo
.addr
) {
74 dev_err_ratelimited(ss
->dev
, "ERROR: sg_miter return null\n");
79 ileft
= areq
->cryptlen
/ 4;
80 oleft
= areq
->cryptlen
/ 4;
84 todo
= min3(rx_cnt
, ileft
, (mi
.length
- oi
) / 4);
87 writesl(ss
->base
+ SS_RXFIFO
, mi
.addr
+ oi
, todo
);
90 if (oi
== mi
.length
) {
95 spaces
= readl(ss
->base
+ SS_FCSR
);
96 rx_cnt
= SS_RXFIFO_SPACES(spaces
);
97 tx_cnt
= SS_TXFIFO_SPACES(spaces
);
99 todo
= min3(tx_cnt
, oleft
, (mo
.length
- oo
) / 4);
102 readsl(ss
->base
+ SS_TXFIFO
, mo
.addr
+ oo
, todo
);
105 if (oo
== mo
.length
) {
112 for (i
= 0; i
< 4 && i
< ivsize
/ 4; i
++) {
113 v
= readl(ss
->base
+ SS_IV0
+ i
* 4);
114 *(u32
*)(areq
->iv
+ i
* 4) = v
;
121 writel(0, ss
->base
+ SS_CTL
);
122 spin_unlock_irqrestore(&ss
->slock
, flags
);
126 /* Generic function that support SG with size not multiple of 4 */
127 static int sun4i_ss_cipher_poll(struct skcipher_request
*areq
)
129 struct crypto_skcipher
*tfm
= crypto_skcipher_reqtfm(areq
);
130 struct sun4i_tfm_ctx
*op
= crypto_skcipher_ctx(tfm
);
131 struct sun4i_ss_ctx
*ss
= op
->ss
;
133 struct scatterlist
*in_sg
= areq
->src
;
134 struct scatterlist
*out_sg
= areq
->dst
;
135 unsigned int ivsize
= crypto_skcipher_ivsize(tfm
);
136 struct sun4i_cipher_req_ctx
*ctx
= skcipher_request_ctx(areq
);
137 u32 mode
= ctx
->mode
;
138 /* when activating SS, the default FIFO space is SS_RX_DEFAULT(32) */
139 u32 rx_cnt
= SS_RX_DEFAULT
;
145 unsigned int ileft
= areq
->cryptlen
;
146 unsigned int oleft
= areq
->cryptlen
;
148 struct sg_mapping_iter mi
, mo
;
149 unsigned int oi
, oo
; /* offset for in and out */
150 char buf
[4 * SS_RX_MAX
];/* buffer for linearize SG src */
151 char bufo
[4 * SS_TX_MAX
]; /* buffer for linearize SG dst */
152 unsigned int ob
= 0; /* offset in buf */
153 unsigned int obo
= 0; /* offset in bufo*/
154 unsigned int obl
= 0; /* length of data in bufo */
161 dev_err_ratelimited(ss
->dev
, "ERROR: Empty IV\n");
165 if (!areq
->src
|| !areq
->dst
) {
166 dev_err_ratelimited(ss
->dev
, "ERROR: Some SGs are NULL\n");
171 * if we have only SGs with size multiple of 4,
172 * we can use the SS optimized function
174 while (in_sg
&& no_chunk
== 1) {
175 if (in_sg
->length
% 4)
177 in_sg
= sg_next(in_sg
);
179 while (out_sg
&& no_chunk
== 1) {
180 if (out_sg
->length
% 4)
182 out_sg
= sg_next(out_sg
);
186 return sun4i_ss_opti_poll(areq
);
188 spin_lock_irqsave(&ss
->slock
, flags
);
190 for (i
= 0; i
< op
->keylen
; i
+= 4)
191 writel(*(op
->key
+ i
/ 4), ss
->base
+ SS_KEY0
+ i
);
194 for (i
= 0; i
< 4 && i
< ivsize
/ 4; i
++) {
195 v
= *(u32
*)(areq
->iv
+ i
* 4);
196 writel(v
, ss
->base
+ SS_IV0
+ i
* 4);
199 writel(mode
, ss
->base
+ SS_CTL
);
201 sg_miter_start(&mi
, areq
->src
, sg_nents(areq
->src
),
202 SG_MITER_FROM_SG
| SG_MITER_ATOMIC
);
203 sg_miter_start(&mo
, areq
->dst
, sg_nents(areq
->dst
),
204 SG_MITER_TO_SG
| SG_MITER_ATOMIC
);
207 if (!mi
.addr
|| !mo
.addr
) {
208 dev_err_ratelimited(ss
->dev
, "ERROR: sg_miter return null\n");
212 ileft
= areq
->cryptlen
;
213 oleft
= areq
->cryptlen
;
220 * todo is the number of consecutive 4byte word that we
221 * can read from current SG
223 todo
= min3(rx_cnt
, ileft
/ 4, (mi
.length
- oi
) / 4);
225 writesl(ss
->base
+ SS_RXFIFO
, mi
.addr
+ oi
,
231 * not enough consecutive bytes, so we need to
232 * linearize in buf. todo is in bytes
233 * After that copy, if we have a multiple of 4
234 * we need to be able to write all buf in one
235 * pass, so it is why we min() with rx_cnt
237 todo
= min3(rx_cnt
* 4 - ob
, ileft
,
239 memcpy(buf
+ ob
, mi
.addr
+ oi
, todo
);
244 writesl(ss
->base
+ SS_RXFIFO
, buf
,
249 if (oi
== mi
.length
) {
255 spaces
= readl(ss
->base
+ SS_FCSR
);
256 rx_cnt
= SS_RXFIFO_SPACES(spaces
);
257 tx_cnt
= SS_TXFIFO_SPACES(spaces
);
258 dev_dbg(ss
->dev
, "%x %u/%u %u/%u cnt=%u %u/%u %u/%u cnt=%u %u\n",
260 oi
, mi
.length
, ileft
, areq
->cryptlen
, rx_cnt
,
261 oo
, mo
.length
, oleft
, areq
->cryptlen
, tx_cnt
, ob
);
265 /* todo in 4bytes word */
266 todo
= min3(tx_cnt
, oleft
/ 4, (mo
.length
- oo
) / 4);
268 readsl(ss
->base
+ SS_TXFIFO
, mo
.addr
+ oo
, todo
);
271 if (oo
== mo
.length
) {
277 * read obl bytes in bufo, we read at maximum for
278 * emptying the device
280 readsl(ss
->base
+ SS_TXFIFO
, bufo
, tx_cnt
);
285 * how many bytes we can copy ?
286 * no more than remaining SG size
287 * no more than remaining buffer
288 * no need to test against oleft
290 todo
= min(mo
.length
- oo
, obl
- obo
);
291 memcpy(mo
.addr
+ oo
, bufo
+ obo
, todo
);
295 if (oo
== mo
.length
) {
300 /* bufo must be fully used here */
304 for (i
= 0; i
< 4 && i
< ivsize
/ 4; i
++) {
305 v
= readl(ss
->base
+ SS_IV0
+ i
* 4);
306 *(u32
*)(areq
->iv
+ i
* 4) = v
;
313 writel(0, ss
->base
+ SS_CTL
);
314 spin_unlock_irqrestore(&ss
->slock
, flags
);
320 int sun4i_ss_cbc_aes_encrypt(struct skcipher_request
*areq
)
322 struct crypto_skcipher
*tfm
= crypto_skcipher_reqtfm(areq
);
323 struct sun4i_tfm_ctx
*op
= crypto_skcipher_ctx(tfm
);
324 struct sun4i_cipher_req_ctx
*rctx
= skcipher_request_ctx(areq
);
326 rctx
->mode
= SS_OP_AES
| SS_CBC
| SS_ENABLED
| SS_ENCRYPTION
|
328 return sun4i_ss_cipher_poll(areq
);
331 int sun4i_ss_cbc_aes_decrypt(struct skcipher_request
*areq
)
333 struct crypto_skcipher
*tfm
= crypto_skcipher_reqtfm(areq
);
334 struct sun4i_tfm_ctx
*op
= crypto_skcipher_ctx(tfm
);
335 struct sun4i_cipher_req_ctx
*rctx
= skcipher_request_ctx(areq
);
337 rctx
->mode
= SS_OP_AES
| SS_CBC
| SS_ENABLED
| SS_DECRYPTION
|
339 return sun4i_ss_cipher_poll(areq
);
343 int sun4i_ss_ecb_aes_encrypt(struct skcipher_request
*areq
)
345 struct crypto_skcipher
*tfm
= crypto_skcipher_reqtfm(areq
);
346 struct sun4i_tfm_ctx
*op
= crypto_skcipher_ctx(tfm
);
347 struct sun4i_cipher_req_ctx
*rctx
= skcipher_request_ctx(areq
);
349 rctx
->mode
= SS_OP_AES
| SS_ECB
| SS_ENABLED
| SS_ENCRYPTION
|
351 return sun4i_ss_cipher_poll(areq
);
354 int sun4i_ss_ecb_aes_decrypt(struct skcipher_request
*areq
)
356 struct crypto_skcipher
*tfm
= crypto_skcipher_reqtfm(areq
);
357 struct sun4i_tfm_ctx
*op
= crypto_skcipher_ctx(tfm
);
358 struct sun4i_cipher_req_ctx
*rctx
= skcipher_request_ctx(areq
);
360 rctx
->mode
= SS_OP_AES
| SS_ECB
| SS_ENABLED
| SS_DECRYPTION
|
362 return sun4i_ss_cipher_poll(areq
);
366 int sun4i_ss_cbc_des_encrypt(struct skcipher_request
*areq
)
368 struct crypto_skcipher
*tfm
= crypto_skcipher_reqtfm(areq
);
369 struct sun4i_tfm_ctx
*op
= crypto_skcipher_ctx(tfm
);
370 struct sun4i_cipher_req_ctx
*rctx
= skcipher_request_ctx(areq
);
372 rctx
->mode
= SS_OP_DES
| SS_CBC
| SS_ENABLED
| SS_ENCRYPTION
|
374 return sun4i_ss_cipher_poll(areq
);
377 int sun4i_ss_cbc_des_decrypt(struct skcipher_request
*areq
)
379 struct crypto_skcipher
*tfm
= crypto_skcipher_reqtfm(areq
);
380 struct sun4i_tfm_ctx
*op
= crypto_skcipher_ctx(tfm
);
381 struct sun4i_cipher_req_ctx
*rctx
= skcipher_request_ctx(areq
);
383 rctx
->mode
= SS_OP_DES
| SS_CBC
| SS_ENABLED
| SS_DECRYPTION
|
385 return sun4i_ss_cipher_poll(areq
);
389 int sun4i_ss_ecb_des_encrypt(struct skcipher_request
*areq
)
391 struct crypto_skcipher
*tfm
= crypto_skcipher_reqtfm(areq
);
392 struct sun4i_tfm_ctx
*op
= crypto_skcipher_ctx(tfm
);
393 struct sun4i_cipher_req_ctx
*rctx
= skcipher_request_ctx(areq
);
395 rctx
->mode
= SS_OP_DES
| SS_ECB
| SS_ENABLED
| SS_ENCRYPTION
|
397 return sun4i_ss_cipher_poll(areq
);
400 int sun4i_ss_ecb_des_decrypt(struct skcipher_request
*areq
)
402 struct crypto_skcipher
*tfm
= crypto_skcipher_reqtfm(areq
);
403 struct sun4i_tfm_ctx
*op
= crypto_skcipher_ctx(tfm
);
404 struct sun4i_cipher_req_ctx
*rctx
= skcipher_request_ctx(areq
);
406 rctx
->mode
= SS_OP_DES
| SS_ECB
| SS_ENABLED
| SS_DECRYPTION
|
408 return sun4i_ss_cipher_poll(areq
);
412 int sun4i_ss_cbc_des3_encrypt(struct skcipher_request
*areq
)
414 struct crypto_skcipher
*tfm
= crypto_skcipher_reqtfm(areq
);
415 struct sun4i_tfm_ctx
*op
= crypto_skcipher_ctx(tfm
);
416 struct sun4i_cipher_req_ctx
*rctx
= skcipher_request_ctx(areq
);
418 rctx
->mode
= SS_OP_3DES
| SS_CBC
| SS_ENABLED
| SS_ENCRYPTION
|
420 return sun4i_ss_cipher_poll(areq
);
423 int sun4i_ss_cbc_des3_decrypt(struct skcipher_request
*areq
)
425 struct crypto_skcipher
*tfm
= crypto_skcipher_reqtfm(areq
);
426 struct sun4i_tfm_ctx
*op
= crypto_skcipher_ctx(tfm
);
427 struct sun4i_cipher_req_ctx
*rctx
= skcipher_request_ctx(areq
);
429 rctx
->mode
= SS_OP_3DES
| SS_CBC
| SS_ENABLED
| SS_DECRYPTION
|
431 return sun4i_ss_cipher_poll(areq
);
435 int sun4i_ss_ecb_des3_encrypt(struct skcipher_request
*areq
)
437 struct crypto_skcipher
*tfm
= crypto_skcipher_reqtfm(areq
);
438 struct sun4i_tfm_ctx
*op
= crypto_skcipher_ctx(tfm
);
439 struct sun4i_cipher_req_ctx
*rctx
= skcipher_request_ctx(areq
);
441 rctx
->mode
= SS_OP_3DES
| SS_ECB
| SS_ENABLED
| SS_ENCRYPTION
|
443 return sun4i_ss_cipher_poll(areq
);
446 int sun4i_ss_ecb_des3_decrypt(struct skcipher_request
*areq
)
448 struct crypto_skcipher
*tfm
= crypto_skcipher_reqtfm(areq
);
449 struct sun4i_tfm_ctx
*op
= crypto_skcipher_ctx(tfm
);
450 struct sun4i_cipher_req_ctx
*rctx
= skcipher_request_ctx(areq
);
452 rctx
->mode
= SS_OP_3DES
| SS_ECB
| SS_ENABLED
| SS_DECRYPTION
|
454 return sun4i_ss_cipher_poll(areq
);
457 int sun4i_ss_cipher_init(struct crypto_tfm
*tfm
)
459 struct sun4i_tfm_ctx
*op
= crypto_tfm_ctx(tfm
);
460 struct sun4i_ss_alg_template
*algt
;
462 memset(op
, 0, sizeof(struct sun4i_tfm_ctx
));
464 algt
= container_of(tfm
->__crt_alg
, struct sun4i_ss_alg_template
,
468 crypto_skcipher_set_reqsize(__crypto_skcipher_cast(tfm
),
469 sizeof(struct sun4i_cipher_req_ctx
));
474 /* check and set the AES key, prepare the mode to be used */
475 int sun4i_ss_aes_setkey(struct crypto_skcipher
*tfm
, const u8
*key
,
478 struct sun4i_tfm_ctx
*op
= crypto_skcipher_ctx(tfm
);
479 struct sun4i_ss_ctx
*ss
= op
->ss
;
483 op
->keymode
= SS_AES_128BITS
;
486 op
->keymode
= SS_AES_192BITS
;
489 op
->keymode
= SS_AES_256BITS
;
492 dev_err(ss
->dev
, "ERROR: Invalid keylen %u\n", keylen
);
493 crypto_skcipher_set_flags(tfm
, CRYPTO_TFM_RES_BAD_KEY_LEN
);
497 memcpy(op
->key
, key
, keylen
);
501 /* check and set the DES key, prepare the mode to be used */
502 int sun4i_ss_des_setkey(struct crypto_skcipher
*tfm
, const u8
*key
,
505 struct sun4i_tfm_ctx
*op
= crypto_skcipher_ctx(tfm
);
506 struct sun4i_ss_ctx
*ss
= op
->ss
;
508 u32 tmp
[DES_EXPKEY_WORDS
];
511 if (unlikely(keylen
!= DES_KEY_SIZE
)) {
512 dev_err(ss
->dev
, "Invalid keylen %u\n", keylen
);
513 crypto_skcipher_set_flags(tfm
, CRYPTO_TFM_RES_BAD_KEY_LEN
);
517 flags
= crypto_skcipher_get_flags(tfm
);
519 ret
= des_ekey(tmp
, key
);
520 if (unlikely(!ret
) && (flags
& CRYPTO_TFM_REQ_WEAK_KEY
)) {
521 crypto_skcipher_set_flags(tfm
, CRYPTO_TFM_RES_WEAK_KEY
);
522 dev_dbg(ss
->dev
, "Weak key %u\n", keylen
);
527 memcpy(op
->key
, key
, keylen
);
531 /* check and set the 3DES key, prepare the mode to be used */
532 int sun4i_ss_des3_setkey(struct crypto_skcipher
*tfm
, const u8
*key
,
535 struct sun4i_tfm_ctx
*op
= crypto_skcipher_ctx(tfm
);
536 struct sun4i_ss_ctx
*ss
= op
->ss
;
538 if (unlikely(keylen
!= 3 * DES_KEY_SIZE
)) {
539 dev_err(ss
->dev
, "Invalid keylen %u\n", keylen
);
540 crypto_skcipher_set_flags(tfm
, CRYPTO_TFM_RES_BAD_KEY_LEN
);
544 memcpy(op
->key
, key
, keylen
);