1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2016-2017 Hisilicon Limited. */
3 #include <linux/crypto.h>
4 #include <linux/dma-mapping.h>
5 #include <linux/dmapool.h>
6 #include <linux/module.h>
7 #include <linux/mutex.h>
8 #include <linux/slab.h>
10 #include <crypto/aes.h>
11 #include <crypto/algapi.h>
12 #include <crypto/internal/des.h>
13 #include <crypto/skcipher.h>
14 #include <crypto/xts.h>
15 #include <crypto/internal/skcipher.h>
19 #define SEC_MAX_CIPHER_KEY 64
20 #define SEC_REQ_LIMIT SZ_32M
22 struct sec_c_alg_cfg
{
29 static const struct sec_c_alg_cfg sec_c_alg_cfgs
[] = {
30 [SEC_C_DES_ECB_64
] = {
31 .c_alg
= SEC_C_ALG_DES
,
32 .c_mode
= SEC_C_MODE_ECB
,
33 .key_len
= SEC_KEY_LEN_DES
,
35 [SEC_C_DES_CBC_64
] = {
36 .c_alg
= SEC_C_ALG_DES
,
37 .c_mode
= SEC_C_MODE_CBC
,
38 .key_len
= SEC_KEY_LEN_DES
,
40 [SEC_C_3DES_ECB_192_3KEY
] = {
41 .c_alg
= SEC_C_ALG_3DES
,
42 .c_mode
= SEC_C_MODE_ECB
,
43 .key_len
= SEC_KEY_LEN_3DES_3_KEY
,
45 [SEC_C_3DES_ECB_192_2KEY
] = {
46 .c_alg
= SEC_C_ALG_3DES
,
47 .c_mode
= SEC_C_MODE_ECB
,
48 .key_len
= SEC_KEY_LEN_3DES_2_KEY
,
50 [SEC_C_3DES_CBC_192_3KEY
] = {
51 .c_alg
= SEC_C_ALG_3DES
,
52 .c_mode
= SEC_C_MODE_CBC
,
53 .key_len
= SEC_KEY_LEN_3DES_3_KEY
,
55 [SEC_C_3DES_CBC_192_2KEY
] = {
56 .c_alg
= SEC_C_ALG_3DES
,
57 .c_mode
= SEC_C_MODE_CBC
,
58 .key_len
= SEC_KEY_LEN_3DES_2_KEY
,
60 [SEC_C_AES_ECB_128
] = {
61 .c_alg
= SEC_C_ALG_AES
,
62 .c_mode
= SEC_C_MODE_ECB
,
63 .key_len
= SEC_KEY_LEN_AES_128
,
65 [SEC_C_AES_ECB_192
] = {
66 .c_alg
= SEC_C_ALG_AES
,
67 .c_mode
= SEC_C_MODE_ECB
,
68 .key_len
= SEC_KEY_LEN_AES_192
,
70 [SEC_C_AES_ECB_256
] = {
71 .c_alg
= SEC_C_ALG_AES
,
72 .c_mode
= SEC_C_MODE_ECB
,
73 .key_len
= SEC_KEY_LEN_AES_256
,
75 [SEC_C_AES_CBC_128
] = {
76 .c_alg
= SEC_C_ALG_AES
,
77 .c_mode
= SEC_C_MODE_CBC
,
78 .key_len
= SEC_KEY_LEN_AES_128
,
80 [SEC_C_AES_CBC_192
] = {
81 .c_alg
= SEC_C_ALG_AES
,
82 .c_mode
= SEC_C_MODE_CBC
,
83 .key_len
= SEC_KEY_LEN_AES_192
,
85 [SEC_C_AES_CBC_256
] = {
86 .c_alg
= SEC_C_ALG_AES
,
87 .c_mode
= SEC_C_MODE_CBC
,
88 .key_len
= SEC_KEY_LEN_AES_256
,
90 [SEC_C_AES_CTR_128
] = {
91 .c_alg
= SEC_C_ALG_AES
,
92 .c_mode
= SEC_C_MODE_CTR
,
93 .key_len
= SEC_KEY_LEN_AES_128
,
95 [SEC_C_AES_CTR_192
] = {
96 .c_alg
= SEC_C_ALG_AES
,
97 .c_mode
= SEC_C_MODE_CTR
,
98 .key_len
= SEC_KEY_LEN_AES_192
,
100 [SEC_C_AES_CTR_256
] = {
101 .c_alg
= SEC_C_ALG_AES
,
102 .c_mode
= SEC_C_MODE_CTR
,
103 .key_len
= SEC_KEY_LEN_AES_256
,
105 [SEC_C_AES_XTS_128
] = {
106 .c_alg
= SEC_C_ALG_AES
,
107 .c_mode
= SEC_C_MODE_XTS
,
108 .key_len
= SEC_KEY_LEN_AES_128
,
110 [SEC_C_AES_XTS_256
] = {
111 .c_alg
= SEC_C_ALG_AES
,
112 .c_mode
= SEC_C_MODE_XTS
,
113 .key_len
= SEC_KEY_LEN_AES_256
,
120 * Mutex used to ensure safe operation of reference count of
123 static DEFINE_MUTEX(algs_lock
);
124 static unsigned int active_devs
;
126 static void sec_alg_skcipher_init_template(struct sec_alg_tfm_ctx
*ctx
,
127 struct sec_bd_info
*req
,
128 enum sec_cipher_alg alg
)
130 const struct sec_c_alg_cfg
*cfg
= &sec_c_alg_cfgs
[alg
];
132 memset(req
, 0, sizeof(*req
));
133 req
->w0
|= cfg
->c_mode
<< SEC_BD_W0_C_MODE_S
;
134 req
->w1
|= cfg
->c_alg
<< SEC_BD_W1_C_ALG_S
;
135 req
->w3
|= cfg
->key_len
<< SEC_BD_W3_C_KEY_LEN_S
;
136 req
->w0
|= cfg
->c_width
<< SEC_BD_W0_C_WIDTH_S
;
138 req
->cipher_key_addr_lo
= lower_32_bits(ctx
->pkey
);
139 req
->cipher_key_addr_hi
= upper_32_bits(ctx
->pkey
);
142 static void sec_alg_skcipher_init_context(struct crypto_skcipher
*atfm
,
145 enum sec_cipher_alg alg
)
147 struct crypto_tfm
*tfm
= crypto_skcipher_tfm(atfm
);
148 struct sec_alg_tfm_ctx
*ctx
= crypto_tfm_ctx(tfm
);
150 ctx
->cipher_alg
= alg
;
151 memcpy(ctx
->key
, key
, keylen
);
152 sec_alg_skcipher_init_template(ctx
, &ctx
->req_template
,
156 static void sec_free_hw_sgl(struct sec_hw_sgl
*hw_sgl
,
157 dma_addr_t psec_sgl
, struct sec_dev_info
*info
)
159 struct sec_hw_sgl
*sgl_current
, *sgl_next
;
160 dma_addr_t sgl_next_dma
;
162 sgl_current
= hw_sgl
;
163 while (sgl_current
) {
164 sgl_next
= sgl_current
->next
;
165 sgl_next_dma
= sgl_current
->next_sgl
;
167 dma_pool_free(info
->hw_sgl_pool
, sgl_current
, psec_sgl
);
169 sgl_current
= sgl_next
;
170 psec_sgl
= sgl_next_dma
;
174 static int sec_alloc_and_fill_hw_sgl(struct sec_hw_sgl
**sec_sgl
,
175 dma_addr_t
*psec_sgl
,
176 struct scatterlist
*sgl
,
178 struct sec_dev_info
*info
,
181 struct sec_hw_sgl
*sgl_current
= NULL
;
182 struct sec_hw_sgl
*sgl_next
;
183 dma_addr_t sgl_next_dma
;
184 struct scatterlist
*sg
;
185 int ret
, sge_index
, i
;
190 for_each_sg(sgl
, sg
, count
, i
) {
191 sge_index
= i
% SEC_MAX_SGE_NUM
;
192 if (sge_index
== 0) {
193 sgl_next
= dma_pool_zalloc(info
->hw_sgl_pool
,
197 goto err_free_hw_sgls
;
200 if (!sgl_current
) { /* First one */
201 *psec_sgl
= sgl_next_dma
;
203 } else { /* Chained */
204 sgl_current
->entry_sum_in_sgl
= SEC_MAX_SGE_NUM
;
205 sgl_current
->next_sgl
= sgl_next_dma
;
206 sgl_current
->next
= sgl_next
;
208 sgl_current
= sgl_next
;
210 sgl_current
->sge_entries
[sge_index
].buf
= sg_dma_address(sg
);
211 sgl_current
->sge_entries
[sge_index
].len
= sg_dma_len(sg
);
212 sgl_current
->data_bytes_in_sgl
+= sg_dma_len(sg
);
214 sgl_current
->entry_sum_in_sgl
= count
% SEC_MAX_SGE_NUM
;
215 sgl_current
->next_sgl
= 0;
216 (*sec_sgl
)->entry_sum_in_chain
= count
;
221 sec_free_hw_sgl(*sec_sgl
, *psec_sgl
, info
);
227 static int sec_alg_skcipher_setkey(struct crypto_skcipher
*tfm
,
228 const u8
*key
, unsigned int keylen
,
229 enum sec_cipher_alg alg
)
231 struct sec_alg_tfm_ctx
*ctx
= crypto_skcipher_ctx(tfm
);
232 struct device
*dev
= ctx
->queue
->dev_info
->dev
;
234 mutex_lock(&ctx
->lock
);
237 memset(ctx
->key
, 0, SEC_MAX_CIPHER_KEY
);
240 ctx
->key
= dma_alloc_coherent(dev
, SEC_MAX_CIPHER_KEY
,
241 &ctx
->pkey
, GFP_KERNEL
);
243 mutex_unlock(&ctx
->lock
);
247 mutex_unlock(&ctx
->lock
);
248 sec_alg_skcipher_init_context(tfm
, key
, keylen
, alg
);
253 static int sec_alg_skcipher_setkey_aes_ecb(struct crypto_skcipher
*tfm
,
254 const u8
*key
, unsigned int keylen
)
256 enum sec_cipher_alg alg
;
259 case AES_KEYSIZE_128
:
260 alg
= SEC_C_AES_ECB_128
;
262 case AES_KEYSIZE_192
:
263 alg
= SEC_C_AES_ECB_192
;
265 case AES_KEYSIZE_256
:
266 alg
= SEC_C_AES_ECB_256
;
272 return sec_alg_skcipher_setkey(tfm
, key
, keylen
, alg
);
275 static int sec_alg_skcipher_setkey_aes_cbc(struct crypto_skcipher
*tfm
,
276 const u8
*key
, unsigned int keylen
)
278 enum sec_cipher_alg alg
;
281 case AES_KEYSIZE_128
:
282 alg
= SEC_C_AES_CBC_128
;
284 case AES_KEYSIZE_192
:
285 alg
= SEC_C_AES_CBC_192
;
287 case AES_KEYSIZE_256
:
288 alg
= SEC_C_AES_CBC_256
;
294 return sec_alg_skcipher_setkey(tfm
, key
, keylen
, alg
);
297 static int sec_alg_skcipher_setkey_aes_ctr(struct crypto_skcipher
*tfm
,
298 const u8
*key
, unsigned int keylen
)
300 enum sec_cipher_alg alg
;
303 case AES_KEYSIZE_128
:
304 alg
= SEC_C_AES_CTR_128
;
306 case AES_KEYSIZE_192
:
307 alg
= SEC_C_AES_CTR_192
;
309 case AES_KEYSIZE_256
:
310 alg
= SEC_C_AES_CTR_256
;
316 return sec_alg_skcipher_setkey(tfm
, key
, keylen
, alg
);
319 static int sec_alg_skcipher_setkey_aes_xts(struct crypto_skcipher
*tfm
,
320 const u8
*key
, unsigned int keylen
)
322 enum sec_cipher_alg alg
;
325 ret
= xts_verify_key(tfm
, key
, keylen
);
330 case AES_KEYSIZE_128
* 2:
331 alg
= SEC_C_AES_XTS_128
;
333 case AES_KEYSIZE_256
* 2:
334 alg
= SEC_C_AES_XTS_256
;
340 return sec_alg_skcipher_setkey(tfm
, key
, keylen
, alg
);
343 static int sec_alg_skcipher_setkey_des_ecb(struct crypto_skcipher
*tfm
,
344 const u8
*key
, unsigned int keylen
)
346 return verify_skcipher_des_key(tfm
, key
) ?:
347 sec_alg_skcipher_setkey(tfm
, key
, keylen
, SEC_C_DES_ECB_64
);
350 static int sec_alg_skcipher_setkey_des_cbc(struct crypto_skcipher
*tfm
,
351 const u8
*key
, unsigned int keylen
)
353 return verify_skcipher_des_key(tfm
, key
) ?:
354 sec_alg_skcipher_setkey(tfm
, key
, keylen
, SEC_C_DES_CBC_64
);
357 static int sec_alg_skcipher_setkey_3des_ecb(struct crypto_skcipher
*tfm
,
358 const u8
*key
, unsigned int keylen
)
360 return verify_skcipher_des3_key(tfm
, key
) ?:
361 sec_alg_skcipher_setkey(tfm
, key
, keylen
,
362 SEC_C_3DES_ECB_192_3KEY
);
365 static int sec_alg_skcipher_setkey_3des_cbc(struct crypto_skcipher
*tfm
,
366 const u8
*key
, unsigned int keylen
)
368 return verify_skcipher_des3_key(tfm
, key
) ?:
369 sec_alg_skcipher_setkey(tfm
, key
, keylen
,
370 SEC_C_3DES_CBC_192_3KEY
);
373 static void sec_alg_free_el(struct sec_request_el
*el
,
374 struct sec_dev_info
*info
)
376 sec_free_hw_sgl(el
->out
, el
->dma_out
, info
);
377 sec_free_hw_sgl(el
->in
, el
->dma_in
, info
);
383 /* queuelock must be held */
384 static int sec_send_request(struct sec_request
*sec_req
, struct sec_queue
*queue
)
386 struct sec_request_el
*el
, *temp
;
389 mutex_lock(&sec_req
->lock
);
390 list_for_each_entry_safe(el
, temp
, &sec_req
->elements
, head
) {
392 * Add to hardware queue only under following circumstances
393 * 1) Software and hardware queue empty so no chain dependencies
394 * 2) No dependencies as new IV - (check software queue empty
396 * 3) No dependencies because the mode does no chaining.
398 * In other cases first insert onto the software queue which
399 * is then emptied as requests complete
401 if (!queue
->havesoftqueue
||
402 (kfifo_is_empty(&queue
->softqueue
) &&
403 sec_queue_empty(queue
))) {
404 ret
= sec_queue_send(queue
, &el
->req
, sec_req
);
405 if (ret
== -EAGAIN
) {
406 /* Wait unti we can send then try again */
407 /* DEAD if here - should not happen */
412 kfifo_put(&queue
->softqueue
, el
);
416 mutex_unlock(&sec_req
->lock
);
421 static void sec_skcipher_alg_callback(struct sec_bd_info
*sec_resp
,
422 struct crypto_async_request
*req_base
)
424 struct skcipher_request
*skreq
= container_of(req_base
,
425 struct skcipher_request
,
427 struct sec_request
*sec_req
= skcipher_request_ctx(skreq
);
428 struct sec_request
*backlog_req
;
429 struct sec_request_el
*sec_req_el
, *nextrequest
;
430 struct sec_alg_tfm_ctx
*ctx
= sec_req
->tfm_ctx
;
431 struct crypto_skcipher
*atfm
= crypto_skcipher_reqtfm(skreq
);
432 struct device
*dev
= ctx
->queue
->dev_info
->dev
;
433 int icv_or_skey_en
, ret
;
436 sec_req_el
= list_first_entry(&sec_req
->elements
, struct sec_request_el
,
438 icv_or_skey_en
= (sec_resp
->w0
& SEC_BD_W0_ICV_OR_SKEY_EN_M
) >>
439 SEC_BD_W0_ICV_OR_SKEY_EN_S
;
440 if (sec_resp
->w1
& SEC_BD_W1_BD_INVALID
|| icv_or_skey_en
== 3) {
441 dev_err(dev
, "Got an invalid answer %lu %d\n",
442 sec_resp
->w1
& SEC_BD_W1_BD_INVALID
,
444 sec_req
->err
= -EINVAL
;
446 * We need to muddle on to avoid getting stuck with elements
447 * on the queue. Error will be reported so requester so
448 * it should be able to handle appropriately.
452 mutex_lock(&ctx
->queue
->queuelock
);
453 /* Put the IV in place for chained cases */
454 switch (ctx
->cipher_alg
) {
455 case SEC_C_AES_CBC_128
:
456 case SEC_C_AES_CBC_192
:
457 case SEC_C_AES_CBC_256
:
458 if (sec_req_el
->req
.w0
& SEC_BD_W0_DE
)
459 sg_pcopy_to_buffer(sec_req_el
->sgl_out
,
460 sg_nents(sec_req_el
->sgl_out
),
462 crypto_skcipher_ivsize(atfm
),
463 sec_req_el
->el_length
-
464 crypto_skcipher_ivsize(atfm
));
466 sg_pcopy_to_buffer(sec_req_el
->sgl_in
,
467 sg_nents(sec_req_el
->sgl_in
),
469 crypto_skcipher_ivsize(atfm
),
470 sec_req_el
->el_length
-
471 crypto_skcipher_ivsize(atfm
));
472 /* No need to sync to the device as coherent DMA */
474 case SEC_C_AES_CTR_128
:
475 case SEC_C_AES_CTR_192
:
476 case SEC_C_AES_CTR_256
:
477 crypto_inc(skreq
->iv
, 16);
484 if (ctx
->queue
->havesoftqueue
&&
485 !kfifo_is_empty(&ctx
->queue
->softqueue
) &&
486 sec_queue_empty(ctx
->queue
)) {
487 ret
= kfifo_get(&ctx
->queue
->softqueue
, &nextrequest
);
490 "Error getting next element from kfifo %d\n",
493 /* We know there is space so this cannot fail */
494 sec_queue_send(ctx
->queue
, &nextrequest
->req
,
495 nextrequest
->sec_req
);
496 } else if (!list_empty(&ctx
->backlog
)) {
497 /* Need to verify there is room first */
498 backlog_req
= list_first_entry(&ctx
->backlog
,
499 typeof(*backlog_req
),
501 if (sec_queue_can_enqueue(ctx
->queue
,
502 backlog_req
->num_elements
) ||
503 (ctx
->queue
->havesoftqueue
&&
504 kfifo_avail(&ctx
->queue
->softqueue
) >
505 backlog_req
->num_elements
)) {
506 sec_send_request(backlog_req
, ctx
->queue
);
507 backlog_req
->req_base
->complete(backlog_req
->req_base
,
509 list_del(&backlog_req
->backlog_head
);
512 mutex_unlock(&ctx
->queue
->queuelock
);
514 mutex_lock(&sec_req
->lock
);
515 list_del(&sec_req_el
->head
);
516 mutex_unlock(&sec_req
->lock
);
517 sec_alg_free_el(sec_req_el
, ctx
->queue
->dev_info
);
521 * The dance is needed as the lock is freed in the completion
523 mutex_lock(&sec_req
->lock
);
524 done
= list_empty(&sec_req
->elements
);
525 mutex_unlock(&sec_req
->lock
);
527 if (crypto_skcipher_ivsize(atfm
)) {
528 dma_unmap_single(dev
, sec_req
->dma_iv
,
529 crypto_skcipher_ivsize(atfm
),
532 dma_unmap_sg(dev
, skreq
->src
, sec_req
->len_in
,
534 if (skreq
->src
!= skreq
->dst
)
535 dma_unmap_sg(dev
, skreq
->dst
, sec_req
->len_out
,
537 skreq
->base
.complete(&skreq
->base
, sec_req
->err
);
541 void sec_alg_callback(struct sec_bd_info
*resp
, void *shadow
)
543 struct sec_request
*sec_req
= shadow
;
545 sec_req
->cb(resp
, sec_req
->req_base
);
548 static int sec_alg_alloc_and_calc_split_sizes(int length
, size_t **split_sizes
,
549 int *steps
, gfp_t gfp
)
554 /* Split into suitable sized blocks */
555 *steps
= roundup(length
, SEC_REQ_LIMIT
) / SEC_REQ_LIMIT
;
556 sizes
= kcalloc(*steps
, sizeof(*sizes
), gfp
);
560 for (i
= 0; i
< *steps
- 1; i
++)
561 sizes
[i
] = SEC_REQ_LIMIT
;
562 sizes
[*steps
- 1] = length
- SEC_REQ_LIMIT
* (*steps
- 1);
563 *split_sizes
= sizes
;
568 static int sec_map_and_split_sg(struct scatterlist
*sgl
, size_t *split_sizes
,
569 int steps
, struct scatterlist
***splits
,
572 struct device
*dev
, gfp_t gfp
)
576 count
= dma_map_sg(dev
, sgl
, sgl_len_in
, DMA_BIDIRECTIONAL
);
580 *splits
= kcalloc(steps
, sizeof(struct scatterlist
*), gfp
);
585 *splits_nents
= kcalloc(steps
, sizeof(int), gfp
);
586 if (!*splits_nents
) {
588 goto err_free_splits
;
591 /* output the scatter list before and after this */
592 ret
= sg_split(sgl
, count
, 0, steps
, split_sizes
,
593 *splits
, *splits_nents
, gfp
);
596 goto err_free_splits_nents
;
601 err_free_splits_nents
:
602 kfree(*splits_nents
);
606 dma_unmap_sg(dev
, sgl
, sgl_len_in
, DMA_BIDIRECTIONAL
);
612 * Reverses the sec_map_and_split_sg call for messages not yet added to
615 static void sec_unmap_sg_on_err(struct scatterlist
*sgl
, int steps
,
616 struct scatterlist
**splits
, int *splits_nents
,
617 int sgl_len_in
, struct device
*dev
)
621 for (i
= 0; i
< steps
; i
++)
626 dma_unmap_sg(dev
, sgl
, sgl_len_in
, DMA_BIDIRECTIONAL
);
629 static struct sec_request_el
630 *sec_alg_alloc_and_fill_el(struct sec_bd_info
*template, int encrypt
,
631 int el_size
, bool different_dest
,
632 struct scatterlist
*sgl_in
, int n_ents_in
,
633 struct scatterlist
*sgl_out
, int n_ents_out
,
634 struct sec_dev_info
*info
, gfp_t gfp
)
636 struct sec_request_el
*el
;
637 struct sec_bd_info
*req
;
640 el
= kzalloc(sizeof(*el
), gfp
);
642 return ERR_PTR(-ENOMEM
);
643 el
->el_length
= el_size
;
645 memcpy(req
, template, sizeof(*req
));
647 req
->w0
&= ~SEC_BD_W0_CIPHER_M
;
649 req
->w0
|= SEC_CIPHER_ENCRYPT
<< SEC_BD_W0_CIPHER_S
;
651 req
->w0
|= SEC_CIPHER_DECRYPT
<< SEC_BD_W0_CIPHER_S
;
653 req
->w0
&= ~SEC_BD_W0_C_GRAN_SIZE_19_16_M
;
654 req
->w0
|= ((el_size
>> 16) << SEC_BD_W0_C_GRAN_SIZE_19_16_S
) &
655 SEC_BD_W0_C_GRAN_SIZE_19_16_M
;
657 req
->w0
&= ~SEC_BD_W0_C_GRAN_SIZE_21_20_M
;
658 req
->w0
|= ((el_size
>> 20) << SEC_BD_W0_C_GRAN_SIZE_21_20_S
) &
659 SEC_BD_W0_C_GRAN_SIZE_21_20_M
;
661 /* Writing whole u32 so no need to take care of masking */
662 req
->w2
= ((1 << SEC_BD_W2_GRAN_NUM_S
) & SEC_BD_W2_GRAN_NUM_M
) |
663 ((el_size
<< SEC_BD_W2_C_GRAN_SIZE_15_0_S
) &
664 SEC_BD_W2_C_GRAN_SIZE_15_0_M
);
666 req
->w3
&= ~SEC_BD_W3_CIPHER_LEN_OFFSET_M
;
667 req
->w1
|= SEC_BD_W1_ADDR_TYPE
;
671 ret
= sec_alloc_and_fill_hw_sgl(&el
->in
, &el
->dma_in
, el
->sgl_in
,
672 n_ents_in
, info
, gfp
);
676 req
->data_addr_lo
= lower_32_bits(el
->dma_in
);
677 req
->data_addr_hi
= upper_32_bits(el
->dma_in
);
679 if (different_dest
) {
680 el
->sgl_out
= sgl_out
;
681 ret
= sec_alloc_and_fill_hw_sgl(&el
->out
, &el
->dma_out
,
683 n_ents_out
, info
, gfp
);
685 goto err_free_hw_sgl_in
;
687 req
->w0
|= SEC_BD_W0_DE
;
688 req
->cipher_destin_addr_lo
= lower_32_bits(el
->dma_out
);
689 req
->cipher_destin_addr_hi
= upper_32_bits(el
->dma_out
);
692 req
->w0
&= ~SEC_BD_W0_DE
;
693 req
->cipher_destin_addr_lo
= lower_32_bits(el
->dma_in
);
694 req
->cipher_destin_addr_hi
= upper_32_bits(el
->dma_in
);
700 sec_free_hw_sgl(el
->in
, el
->dma_in
, info
);
707 static int sec_alg_skcipher_crypto(struct skcipher_request
*skreq
,
710 struct crypto_skcipher
*atfm
= crypto_skcipher_reqtfm(skreq
);
711 struct crypto_tfm
*tfm
= crypto_skcipher_tfm(atfm
);
712 struct sec_alg_tfm_ctx
*ctx
= crypto_tfm_ctx(tfm
);
713 struct sec_queue
*queue
= ctx
->queue
;
714 struct sec_request
*sec_req
= skcipher_request_ctx(skreq
);
715 struct sec_dev_info
*info
= queue
->dev_info
;
718 struct scatterlist
**splits_in
;
719 struct scatterlist
**splits_out
= NULL
;
720 int *splits_in_nents
;
721 int *splits_out_nents
= NULL
;
722 struct sec_request_el
*el
, *temp
;
723 bool split
= skreq
->src
!= skreq
->dst
;
724 gfp_t gfp
= skreq
->base
.flags
& CRYPTO_TFM_REQ_MAY_SLEEP
? GFP_KERNEL
: GFP_ATOMIC
;
726 mutex_init(&sec_req
->lock
);
727 sec_req
->req_base
= &skreq
->base
;
729 /* SGL mapping out here to allow us to break it up as necessary */
730 sec_req
->len_in
= sg_nents(skreq
->src
);
732 ret
= sec_alg_alloc_and_calc_split_sizes(skreq
->cryptlen
, &split_sizes
,
736 sec_req
->num_elements
= steps
;
737 ret
= sec_map_and_split_sg(skreq
->src
, split_sizes
, steps
, &splits_in
,
738 &splits_in_nents
, sec_req
->len_in
,
741 goto err_free_split_sizes
;
744 sec_req
->len_out
= sg_nents(skreq
->dst
);
745 ret
= sec_map_and_split_sg(skreq
->dst
, split_sizes
, steps
,
746 &splits_out
, &splits_out_nents
,
747 sec_req
->len_out
, info
->dev
, gfp
);
749 goto err_unmap_in_sg
;
751 /* Shared info stored in seq_req - applies to all BDs */
752 sec_req
->tfm_ctx
= ctx
;
753 sec_req
->cb
= sec_skcipher_alg_callback
;
754 INIT_LIST_HEAD(&sec_req
->elements
);
757 * Future optimization.
758 * In the chaining case we can't use a dma pool bounce buffer
759 * but in the case where we know there is no chaining we can
761 if (crypto_skcipher_ivsize(atfm
)) {
762 sec_req
->dma_iv
= dma_map_single(info
->dev
, skreq
->iv
,
763 crypto_skcipher_ivsize(atfm
),
765 if (dma_mapping_error(info
->dev
, sec_req
->dma_iv
)) {
767 goto err_unmap_out_sg
;
771 /* Set them all up then queue - cleaner error handling. */
772 for (i
= 0; i
< steps
; i
++) {
773 el
= sec_alg_alloc_and_fill_el(&ctx
->req_template
,
776 skreq
->src
!= skreq
->dst
,
777 splits_in
[i
], splits_in_nents
[i
],
778 split
? splits_out
[i
] : NULL
,
779 split
? splits_out_nents
[i
] : 0,
783 goto err_free_elements
;
785 el
->req
.cipher_iv_addr_lo
= lower_32_bits(sec_req
->dma_iv
);
786 el
->req
.cipher_iv_addr_hi
= upper_32_bits(sec_req
->dma_iv
);
787 el
->sec_req
= sec_req
;
788 list_add_tail(&el
->head
, &sec_req
->elements
);
792 * Only attempt to queue if the whole lot can fit in the queue -
793 * we can't successfully cleanup after a partial queing so this
794 * must succeed or fail atomically.
796 * Big hammer test of both software and hardware queues - could be
797 * more refined but this is unlikely to happen so no need.
800 /* Grab a big lock for a long time to avoid concurrency issues */
801 mutex_lock(&queue
->queuelock
);
804 * Can go on to queue if we have space in either:
805 * 1) The hardware queue and no software queue
806 * 2) The software queue
807 * AND there is nothing in the backlog. If there is backlog we
808 * have to only queue to the backlog queue and return busy.
810 if ((!sec_queue_can_enqueue(queue
, steps
) &&
811 (!queue
->havesoftqueue
||
812 kfifo_avail(&queue
->softqueue
) > steps
)) ||
813 !list_empty(&ctx
->backlog
)) {
815 if ((skreq
->base
.flags
& CRYPTO_TFM_REQ_MAY_BACKLOG
)) {
816 list_add_tail(&sec_req
->backlog_head
, &ctx
->backlog
);
817 mutex_unlock(&queue
->queuelock
);
821 mutex_unlock(&queue
->queuelock
);
822 goto err_free_elements
;
824 ret
= sec_send_request(sec_req
, queue
);
825 mutex_unlock(&queue
->queuelock
);
827 goto err_free_elements
;
831 /* Cleanup - all elements in pointer arrays have been copied */
832 kfree(splits_in_nents
);
834 kfree(splits_out_nents
);
840 list_for_each_entry_safe(el
, temp
, &sec_req
->elements
, head
) {
842 sec_alg_free_el(el
, info
);
844 if (crypto_skcipher_ivsize(atfm
))
845 dma_unmap_single(info
->dev
, sec_req
->dma_iv
,
846 crypto_skcipher_ivsize(atfm
),
850 sec_unmap_sg_on_err(skreq
->dst
, steps
, splits_out
,
851 splits_out_nents
, sec_req
->len_out
,
854 sec_unmap_sg_on_err(skreq
->src
, steps
, splits_in
, splits_in_nents
,
855 sec_req
->len_in
, info
->dev
);
856 err_free_split_sizes
:
862 static int sec_alg_skcipher_encrypt(struct skcipher_request
*req
)
864 return sec_alg_skcipher_crypto(req
, true);
867 static int sec_alg_skcipher_decrypt(struct skcipher_request
*req
)
869 return sec_alg_skcipher_crypto(req
, false);
872 static int sec_alg_skcipher_init(struct crypto_skcipher
*tfm
)
874 struct sec_alg_tfm_ctx
*ctx
= crypto_skcipher_ctx(tfm
);
876 mutex_init(&ctx
->lock
);
877 INIT_LIST_HEAD(&ctx
->backlog
);
878 crypto_skcipher_set_reqsize(tfm
, sizeof(struct sec_request
));
880 ctx
->queue
= sec_queue_alloc_start_safe();
881 if (IS_ERR(ctx
->queue
))
882 return PTR_ERR(ctx
->queue
);
884 mutex_init(&ctx
->queue
->queuelock
);
885 ctx
->queue
->havesoftqueue
= false;
890 static void sec_alg_skcipher_exit(struct crypto_skcipher
*tfm
)
892 struct sec_alg_tfm_ctx
*ctx
= crypto_skcipher_ctx(tfm
);
893 struct device
*dev
= ctx
->queue
->dev_info
->dev
;
896 memzero_explicit(ctx
->key
, SEC_MAX_CIPHER_KEY
);
897 dma_free_coherent(dev
, SEC_MAX_CIPHER_KEY
, ctx
->key
,
900 sec_queue_stop_release(ctx
->queue
);
903 static int sec_alg_skcipher_init_with_queue(struct crypto_skcipher
*tfm
)
905 struct sec_alg_tfm_ctx
*ctx
= crypto_skcipher_ctx(tfm
);
908 ret
= sec_alg_skcipher_init(tfm
);
912 INIT_KFIFO(ctx
->queue
->softqueue
);
913 ret
= kfifo_alloc(&ctx
->queue
->softqueue
, 512, GFP_KERNEL
);
915 sec_alg_skcipher_exit(tfm
);
918 ctx
->queue
->havesoftqueue
= true;
923 static void sec_alg_skcipher_exit_with_queue(struct crypto_skcipher
*tfm
)
925 struct sec_alg_tfm_ctx
*ctx
= crypto_skcipher_ctx(tfm
);
927 kfifo_free(&ctx
->queue
->softqueue
);
928 sec_alg_skcipher_exit(tfm
);
931 static struct skcipher_alg sec_algs
[] = {
934 .cra_name
= "ecb(aes)",
935 .cra_driver_name
= "hisi_sec_aes_ecb",
936 .cra_priority
= 4001,
937 .cra_flags
= CRYPTO_ALG_ASYNC
|
938 CRYPTO_ALG_ALLOCATES_MEMORY
,
939 .cra_blocksize
= AES_BLOCK_SIZE
,
940 .cra_ctxsize
= sizeof(struct sec_alg_tfm_ctx
),
942 .cra_module
= THIS_MODULE
,
944 .init
= sec_alg_skcipher_init
,
945 .exit
= sec_alg_skcipher_exit
,
946 .setkey
= sec_alg_skcipher_setkey_aes_ecb
,
947 .decrypt
= sec_alg_skcipher_decrypt
,
948 .encrypt
= sec_alg_skcipher_encrypt
,
949 .min_keysize
= AES_MIN_KEY_SIZE
,
950 .max_keysize
= AES_MAX_KEY_SIZE
,
954 .cra_name
= "cbc(aes)",
955 .cra_driver_name
= "hisi_sec_aes_cbc",
956 .cra_priority
= 4001,
957 .cra_flags
= CRYPTO_ALG_ASYNC
|
958 CRYPTO_ALG_ALLOCATES_MEMORY
,
959 .cra_blocksize
= AES_BLOCK_SIZE
,
960 .cra_ctxsize
= sizeof(struct sec_alg_tfm_ctx
),
962 .cra_module
= THIS_MODULE
,
964 .init
= sec_alg_skcipher_init_with_queue
,
965 .exit
= sec_alg_skcipher_exit_with_queue
,
966 .setkey
= sec_alg_skcipher_setkey_aes_cbc
,
967 .decrypt
= sec_alg_skcipher_decrypt
,
968 .encrypt
= sec_alg_skcipher_encrypt
,
969 .min_keysize
= AES_MIN_KEY_SIZE
,
970 .max_keysize
= AES_MAX_KEY_SIZE
,
971 .ivsize
= AES_BLOCK_SIZE
,
974 .cra_name
= "ctr(aes)",
975 .cra_driver_name
= "hisi_sec_aes_ctr",
976 .cra_priority
= 4001,
977 .cra_flags
= CRYPTO_ALG_ASYNC
|
978 CRYPTO_ALG_ALLOCATES_MEMORY
,
979 .cra_blocksize
= AES_BLOCK_SIZE
,
980 .cra_ctxsize
= sizeof(struct sec_alg_tfm_ctx
),
982 .cra_module
= THIS_MODULE
,
984 .init
= sec_alg_skcipher_init_with_queue
,
985 .exit
= sec_alg_skcipher_exit_with_queue
,
986 .setkey
= sec_alg_skcipher_setkey_aes_ctr
,
987 .decrypt
= sec_alg_skcipher_decrypt
,
988 .encrypt
= sec_alg_skcipher_encrypt
,
989 .min_keysize
= AES_MIN_KEY_SIZE
,
990 .max_keysize
= AES_MAX_KEY_SIZE
,
991 .ivsize
= AES_BLOCK_SIZE
,
994 .cra_name
= "xts(aes)",
995 .cra_driver_name
= "hisi_sec_aes_xts",
996 .cra_priority
= 4001,
997 .cra_flags
= CRYPTO_ALG_ASYNC
|
998 CRYPTO_ALG_ALLOCATES_MEMORY
,
999 .cra_blocksize
= AES_BLOCK_SIZE
,
1000 .cra_ctxsize
= sizeof(struct sec_alg_tfm_ctx
),
1002 .cra_module
= THIS_MODULE
,
1004 .init
= sec_alg_skcipher_init
,
1005 .exit
= sec_alg_skcipher_exit
,
1006 .setkey
= sec_alg_skcipher_setkey_aes_xts
,
1007 .decrypt
= sec_alg_skcipher_decrypt
,
1008 .encrypt
= sec_alg_skcipher_encrypt
,
1009 .min_keysize
= 2 * AES_MIN_KEY_SIZE
,
1010 .max_keysize
= 2 * AES_MAX_KEY_SIZE
,
1011 .ivsize
= AES_BLOCK_SIZE
,
1013 /* Unable to find any test vectors so untested */
1015 .cra_name
= "ecb(des)",
1016 .cra_driver_name
= "hisi_sec_des_ecb",
1017 .cra_priority
= 4001,
1018 .cra_flags
= CRYPTO_ALG_ASYNC
|
1019 CRYPTO_ALG_ALLOCATES_MEMORY
,
1020 .cra_blocksize
= DES_BLOCK_SIZE
,
1021 .cra_ctxsize
= sizeof(struct sec_alg_tfm_ctx
),
1023 .cra_module
= THIS_MODULE
,
1025 .init
= sec_alg_skcipher_init
,
1026 .exit
= sec_alg_skcipher_exit
,
1027 .setkey
= sec_alg_skcipher_setkey_des_ecb
,
1028 .decrypt
= sec_alg_skcipher_decrypt
,
1029 .encrypt
= sec_alg_skcipher_encrypt
,
1030 .min_keysize
= DES_KEY_SIZE
,
1031 .max_keysize
= DES_KEY_SIZE
,
1035 .cra_name
= "cbc(des)",
1036 .cra_driver_name
= "hisi_sec_des_cbc",
1037 .cra_priority
= 4001,
1038 .cra_flags
= CRYPTO_ALG_ASYNC
|
1039 CRYPTO_ALG_ALLOCATES_MEMORY
,
1040 .cra_blocksize
= DES_BLOCK_SIZE
,
1041 .cra_ctxsize
= sizeof(struct sec_alg_tfm_ctx
),
1043 .cra_module
= THIS_MODULE
,
1045 .init
= sec_alg_skcipher_init_with_queue
,
1046 .exit
= sec_alg_skcipher_exit_with_queue
,
1047 .setkey
= sec_alg_skcipher_setkey_des_cbc
,
1048 .decrypt
= sec_alg_skcipher_decrypt
,
1049 .encrypt
= sec_alg_skcipher_encrypt
,
1050 .min_keysize
= DES_KEY_SIZE
,
1051 .max_keysize
= DES_KEY_SIZE
,
1052 .ivsize
= DES_BLOCK_SIZE
,
1055 .cra_name
= "cbc(des3_ede)",
1056 .cra_driver_name
= "hisi_sec_3des_cbc",
1057 .cra_priority
= 4001,
1058 .cra_flags
= CRYPTO_ALG_ASYNC
|
1059 CRYPTO_ALG_ALLOCATES_MEMORY
,
1060 .cra_blocksize
= DES3_EDE_BLOCK_SIZE
,
1061 .cra_ctxsize
= sizeof(struct sec_alg_tfm_ctx
),
1063 .cra_module
= THIS_MODULE
,
1065 .init
= sec_alg_skcipher_init_with_queue
,
1066 .exit
= sec_alg_skcipher_exit_with_queue
,
1067 .setkey
= sec_alg_skcipher_setkey_3des_cbc
,
1068 .decrypt
= sec_alg_skcipher_decrypt
,
1069 .encrypt
= sec_alg_skcipher_encrypt
,
1070 .min_keysize
= DES3_EDE_KEY_SIZE
,
1071 .max_keysize
= DES3_EDE_KEY_SIZE
,
1072 .ivsize
= DES3_EDE_BLOCK_SIZE
,
1075 .cra_name
= "ecb(des3_ede)",
1076 .cra_driver_name
= "hisi_sec_3des_ecb",
1077 .cra_priority
= 4001,
1078 .cra_flags
= CRYPTO_ALG_ASYNC
|
1079 CRYPTO_ALG_ALLOCATES_MEMORY
,
1080 .cra_blocksize
= DES3_EDE_BLOCK_SIZE
,
1081 .cra_ctxsize
= sizeof(struct sec_alg_tfm_ctx
),
1083 .cra_module
= THIS_MODULE
,
1085 .init
= sec_alg_skcipher_init
,
1086 .exit
= sec_alg_skcipher_exit
,
1087 .setkey
= sec_alg_skcipher_setkey_3des_ecb
,
1088 .decrypt
= sec_alg_skcipher_decrypt
,
1089 .encrypt
= sec_alg_skcipher_encrypt
,
1090 .min_keysize
= DES3_EDE_KEY_SIZE
,
1091 .max_keysize
= DES3_EDE_KEY_SIZE
,
1096 int sec_algs_register(void)
1100 mutex_lock(&algs_lock
);
1101 if (++active_devs
!= 1)
1104 ret
= crypto_register_skciphers(sec_algs
, ARRAY_SIZE(sec_algs
));
1108 mutex_unlock(&algs_lock
);
1113 void sec_algs_unregister(void)
1115 mutex_lock(&algs_lock
);
1116 if (--active_devs
!= 0)
1118 crypto_unregister_skciphers(sec_algs
, ARRAY_SIZE(sec_algs
));
1121 mutex_unlock(&algs_lock
);