1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2016-2017 Hisilicon Limited. */
3 #include <linux/crypto.h>
4 #include <linux/dma-mapping.h>
5 #include <linux/dmapool.h>
6 #include <linux/module.h>
7 #include <linux/mutex.h>
8 #include <linux/slab.h>
10 #include <crypto/aes.h>
11 #include <crypto/algapi.h>
12 #include <crypto/internal/des.h>
13 #include <crypto/skcipher.h>
14 #include <crypto/xts.h>
15 #include <crypto/internal/skcipher.h>
19 #define SEC_MAX_CIPHER_KEY 64
20 #define SEC_REQ_LIMIT SZ_32M
22 struct sec_c_alg_cfg
{
29 static const struct sec_c_alg_cfg sec_c_alg_cfgs
[] = {
30 [SEC_C_DES_ECB_64
] = {
31 .c_alg
= SEC_C_ALG_DES
,
32 .c_mode
= SEC_C_MODE_ECB
,
33 .key_len
= SEC_KEY_LEN_DES
,
35 [SEC_C_DES_CBC_64
] = {
36 .c_alg
= SEC_C_ALG_DES
,
37 .c_mode
= SEC_C_MODE_CBC
,
38 .key_len
= SEC_KEY_LEN_DES
,
40 [SEC_C_3DES_ECB_192_3KEY
] = {
41 .c_alg
= SEC_C_ALG_3DES
,
42 .c_mode
= SEC_C_MODE_ECB
,
43 .key_len
= SEC_KEY_LEN_3DES_3_KEY
,
45 [SEC_C_3DES_ECB_192_2KEY
] = {
46 .c_alg
= SEC_C_ALG_3DES
,
47 .c_mode
= SEC_C_MODE_ECB
,
48 .key_len
= SEC_KEY_LEN_3DES_2_KEY
,
50 [SEC_C_3DES_CBC_192_3KEY
] = {
51 .c_alg
= SEC_C_ALG_3DES
,
52 .c_mode
= SEC_C_MODE_CBC
,
53 .key_len
= SEC_KEY_LEN_3DES_3_KEY
,
55 [SEC_C_3DES_CBC_192_2KEY
] = {
56 .c_alg
= SEC_C_ALG_3DES
,
57 .c_mode
= SEC_C_MODE_CBC
,
58 .key_len
= SEC_KEY_LEN_3DES_2_KEY
,
60 [SEC_C_AES_ECB_128
] = {
61 .c_alg
= SEC_C_ALG_AES
,
62 .c_mode
= SEC_C_MODE_ECB
,
63 .key_len
= SEC_KEY_LEN_AES_128
,
65 [SEC_C_AES_ECB_192
] = {
66 .c_alg
= SEC_C_ALG_AES
,
67 .c_mode
= SEC_C_MODE_ECB
,
68 .key_len
= SEC_KEY_LEN_AES_192
,
70 [SEC_C_AES_ECB_256
] = {
71 .c_alg
= SEC_C_ALG_AES
,
72 .c_mode
= SEC_C_MODE_ECB
,
73 .key_len
= SEC_KEY_LEN_AES_256
,
75 [SEC_C_AES_CBC_128
] = {
76 .c_alg
= SEC_C_ALG_AES
,
77 .c_mode
= SEC_C_MODE_CBC
,
78 .key_len
= SEC_KEY_LEN_AES_128
,
80 [SEC_C_AES_CBC_192
] = {
81 .c_alg
= SEC_C_ALG_AES
,
82 .c_mode
= SEC_C_MODE_CBC
,
83 .key_len
= SEC_KEY_LEN_AES_192
,
85 [SEC_C_AES_CBC_256
] = {
86 .c_alg
= SEC_C_ALG_AES
,
87 .c_mode
= SEC_C_MODE_CBC
,
88 .key_len
= SEC_KEY_LEN_AES_256
,
90 [SEC_C_AES_CTR_128
] = {
91 .c_alg
= SEC_C_ALG_AES
,
92 .c_mode
= SEC_C_MODE_CTR
,
93 .key_len
= SEC_KEY_LEN_AES_128
,
95 [SEC_C_AES_CTR_192
] = {
96 .c_alg
= SEC_C_ALG_AES
,
97 .c_mode
= SEC_C_MODE_CTR
,
98 .key_len
= SEC_KEY_LEN_AES_192
,
100 [SEC_C_AES_CTR_256
] = {
101 .c_alg
= SEC_C_ALG_AES
,
102 .c_mode
= SEC_C_MODE_CTR
,
103 .key_len
= SEC_KEY_LEN_AES_256
,
105 [SEC_C_AES_XTS_128
] = {
106 .c_alg
= SEC_C_ALG_AES
,
107 .c_mode
= SEC_C_MODE_XTS
,
108 .key_len
= SEC_KEY_LEN_AES_128
,
110 [SEC_C_AES_XTS_256
] = {
111 .c_alg
= SEC_C_ALG_AES
,
112 .c_mode
= SEC_C_MODE_XTS
,
113 .key_len
= SEC_KEY_LEN_AES_256
,
120 * Mutex used to ensure safe operation of reference count of
123 static DEFINE_MUTEX(algs_lock
);
124 static unsigned int active_devs
;
126 static void sec_alg_skcipher_init_template(struct sec_alg_tfm_ctx
*ctx
,
127 struct sec_bd_info
*req
,
128 enum sec_cipher_alg alg
)
130 const struct sec_c_alg_cfg
*cfg
= &sec_c_alg_cfgs
[alg
];
132 memset(req
, 0, sizeof(*req
));
133 req
->w0
|= cfg
->c_mode
<< SEC_BD_W0_C_MODE_S
;
134 req
->w1
|= cfg
->c_alg
<< SEC_BD_W1_C_ALG_S
;
135 req
->w3
|= cfg
->key_len
<< SEC_BD_W3_C_KEY_LEN_S
;
136 req
->w0
|= cfg
->c_width
<< SEC_BD_W0_C_WIDTH_S
;
138 req
->cipher_key_addr_lo
= lower_32_bits(ctx
->pkey
);
139 req
->cipher_key_addr_hi
= upper_32_bits(ctx
->pkey
);
142 static void sec_alg_skcipher_init_context(struct crypto_skcipher
*atfm
,
145 enum sec_cipher_alg alg
)
147 struct crypto_tfm
*tfm
= crypto_skcipher_tfm(atfm
);
148 struct sec_alg_tfm_ctx
*ctx
= crypto_tfm_ctx(tfm
);
150 ctx
->cipher_alg
= alg
;
151 memcpy(ctx
->key
, key
, keylen
);
152 sec_alg_skcipher_init_template(ctx
, &ctx
->req_template
,
156 static void sec_free_hw_sgl(struct sec_hw_sgl
*hw_sgl
,
157 dma_addr_t psec_sgl
, struct sec_dev_info
*info
)
159 struct sec_hw_sgl
*sgl_current
, *sgl_next
;
160 dma_addr_t sgl_next_dma
;
162 sgl_current
= hw_sgl
;
163 while (sgl_current
) {
164 sgl_next
= sgl_current
->next
;
165 sgl_next_dma
= sgl_current
->next_sgl
;
167 dma_pool_free(info
->hw_sgl_pool
, sgl_current
, psec_sgl
);
169 sgl_current
= sgl_next
;
170 psec_sgl
= sgl_next_dma
;
174 static int sec_alloc_and_fill_hw_sgl(struct sec_hw_sgl
**sec_sgl
,
175 dma_addr_t
*psec_sgl
,
176 struct scatterlist
*sgl
,
178 struct sec_dev_info
*info
)
180 struct sec_hw_sgl
*sgl_current
= NULL
;
181 struct sec_hw_sgl
*sgl_next
;
182 dma_addr_t sgl_next_dma
;
183 struct scatterlist
*sg
;
184 int ret
, sge_index
, i
;
189 for_each_sg(sgl
, sg
, count
, i
) {
190 sge_index
= i
% SEC_MAX_SGE_NUM
;
191 if (sge_index
== 0) {
192 sgl_next
= dma_pool_zalloc(info
->hw_sgl_pool
,
193 GFP_KERNEL
, &sgl_next_dma
);
196 goto err_free_hw_sgls
;
199 if (!sgl_current
) { /* First one */
200 *psec_sgl
= sgl_next_dma
;
202 } else { /* Chained */
203 sgl_current
->entry_sum_in_sgl
= SEC_MAX_SGE_NUM
;
204 sgl_current
->next_sgl
= sgl_next_dma
;
205 sgl_current
->next
= sgl_next
;
207 sgl_current
= sgl_next
;
209 sgl_current
->sge_entries
[sge_index
].buf
= sg_dma_address(sg
);
210 sgl_current
->sge_entries
[sge_index
].len
= sg_dma_len(sg
);
211 sgl_current
->data_bytes_in_sgl
+= sg_dma_len(sg
);
213 sgl_current
->entry_sum_in_sgl
= count
% SEC_MAX_SGE_NUM
;
214 sgl_current
->next_sgl
= 0;
215 (*sec_sgl
)->entry_sum_in_chain
= count
;
220 sec_free_hw_sgl(*sec_sgl
, *psec_sgl
, info
);
226 static int sec_alg_skcipher_setkey(struct crypto_skcipher
*tfm
,
227 const u8
*key
, unsigned int keylen
,
228 enum sec_cipher_alg alg
)
230 struct sec_alg_tfm_ctx
*ctx
= crypto_skcipher_ctx(tfm
);
231 struct device
*dev
= ctx
->queue
->dev_info
->dev
;
233 mutex_lock(&ctx
->lock
);
236 memset(ctx
->key
, 0, SEC_MAX_CIPHER_KEY
);
239 ctx
->key
= dma_alloc_coherent(dev
, SEC_MAX_CIPHER_KEY
,
240 &ctx
->pkey
, GFP_KERNEL
);
242 mutex_unlock(&ctx
->lock
);
246 mutex_unlock(&ctx
->lock
);
247 sec_alg_skcipher_init_context(tfm
, key
, keylen
, alg
);
252 static int sec_alg_skcipher_setkey_aes_ecb(struct crypto_skcipher
*tfm
,
253 const u8
*key
, unsigned int keylen
)
255 enum sec_cipher_alg alg
;
258 case AES_KEYSIZE_128
:
259 alg
= SEC_C_AES_ECB_128
;
261 case AES_KEYSIZE_192
:
262 alg
= SEC_C_AES_ECB_192
;
264 case AES_KEYSIZE_256
:
265 alg
= SEC_C_AES_ECB_256
;
271 return sec_alg_skcipher_setkey(tfm
, key
, keylen
, alg
);
274 static int sec_alg_skcipher_setkey_aes_cbc(struct crypto_skcipher
*tfm
,
275 const u8
*key
, unsigned int keylen
)
277 enum sec_cipher_alg alg
;
280 case AES_KEYSIZE_128
:
281 alg
= SEC_C_AES_CBC_128
;
283 case AES_KEYSIZE_192
:
284 alg
= SEC_C_AES_CBC_192
;
286 case AES_KEYSIZE_256
:
287 alg
= SEC_C_AES_CBC_256
;
293 return sec_alg_skcipher_setkey(tfm
, key
, keylen
, alg
);
296 static int sec_alg_skcipher_setkey_aes_ctr(struct crypto_skcipher
*tfm
,
297 const u8
*key
, unsigned int keylen
)
299 enum sec_cipher_alg alg
;
302 case AES_KEYSIZE_128
:
303 alg
= SEC_C_AES_CTR_128
;
305 case AES_KEYSIZE_192
:
306 alg
= SEC_C_AES_CTR_192
;
308 case AES_KEYSIZE_256
:
309 alg
= SEC_C_AES_CTR_256
;
315 return sec_alg_skcipher_setkey(tfm
, key
, keylen
, alg
);
318 static int sec_alg_skcipher_setkey_aes_xts(struct crypto_skcipher
*tfm
,
319 const u8
*key
, unsigned int keylen
)
321 enum sec_cipher_alg alg
;
324 ret
= xts_verify_key(tfm
, key
, keylen
);
329 case AES_KEYSIZE_128
* 2:
330 alg
= SEC_C_AES_XTS_128
;
332 case AES_KEYSIZE_256
* 2:
333 alg
= SEC_C_AES_XTS_256
;
339 return sec_alg_skcipher_setkey(tfm
, key
, keylen
, alg
);
342 static int sec_alg_skcipher_setkey_des_ecb(struct crypto_skcipher
*tfm
,
343 const u8
*key
, unsigned int keylen
)
345 return verify_skcipher_des_key(tfm
, key
) ?:
346 sec_alg_skcipher_setkey(tfm
, key
, keylen
, SEC_C_DES_ECB_64
);
349 static int sec_alg_skcipher_setkey_des_cbc(struct crypto_skcipher
*tfm
,
350 const u8
*key
, unsigned int keylen
)
352 return verify_skcipher_des_key(tfm
, key
) ?:
353 sec_alg_skcipher_setkey(tfm
, key
, keylen
, SEC_C_DES_CBC_64
);
356 static int sec_alg_skcipher_setkey_3des_ecb(struct crypto_skcipher
*tfm
,
357 const u8
*key
, unsigned int keylen
)
359 return verify_skcipher_des3_key(tfm
, key
) ?:
360 sec_alg_skcipher_setkey(tfm
, key
, keylen
,
361 SEC_C_3DES_ECB_192_3KEY
);
364 static int sec_alg_skcipher_setkey_3des_cbc(struct crypto_skcipher
*tfm
,
365 const u8
*key
, unsigned int keylen
)
367 return verify_skcipher_des3_key(tfm
, key
) ?:
368 sec_alg_skcipher_setkey(tfm
, key
, keylen
,
369 SEC_C_3DES_CBC_192_3KEY
);
372 static void sec_alg_free_el(struct sec_request_el
*el
,
373 struct sec_dev_info
*info
)
375 sec_free_hw_sgl(el
->out
, el
->dma_out
, info
);
376 sec_free_hw_sgl(el
->in
, el
->dma_in
, info
);
382 /* queuelock must be held */
383 static int sec_send_request(struct sec_request
*sec_req
, struct sec_queue
*queue
)
385 struct sec_request_el
*el
, *temp
;
388 mutex_lock(&sec_req
->lock
);
389 list_for_each_entry_safe(el
, temp
, &sec_req
->elements
, head
) {
391 * Add to hardware queue only under following circumstances
392 * 1) Software and hardware queue empty so no chain dependencies
393 * 2) No dependencies as new IV - (check software queue empty
395 * 3) No dependencies because the mode does no chaining.
397 * In other cases first insert onto the software queue which
398 * is then emptied as requests complete
400 if (!queue
->havesoftqueue
||
401 (kfifo_is_empty(&queue
->softqueue
) &&
402 sec_queue_empty(queue
))) {
403 ret
= sec_queue_send(queue
, &el
->req
, sec_req
);
404 if (ret
== -EAGAIN
) {
405 /* Wait unti we can send then try again */
406 /* DEAD if here - should not happen */
411 kfifo_put(&queue
->softqueue
, el
);
415 mutex_unlock(&sec_req
->lock
);
420 static void sec_skcipher_alg_callback(struct sec_bd_info
*sec_resp
,
421 struct crypto_async_request
*req_base
)
423 struct skcipher_request
*skreq
= container_of(req_base
,
424 struct skcipher_request
,
426 struct sec_request
*sec_req
= skcipher_request_ctx(skreq
);
427 struct sec_request
*backlog_req
;
428 struct sec_request_el
*sec_req_el
, *nextrequest
;
429 struct sec_alg_tfm_ctx
*ctx
= sec_req
->tfm_ctx
;
430 struct crypto_skcipher
*atfm
= crypto_skcipher_reqtfm(skreq
);
431 struct device
*dev
= ctx
->queue
->dev_info
->dev
;
432 int icv_or_skey_en
, ret
;
435 sec_req_el
= list_first_entry(&sec_req
->elements
, struct sec_request_el
,
437 icv_or_skey_en
= (sec_resp
->w0
& SEC_BD_W0_ICV_OR_SKEY_EN_M
) >>
438 SEC_BD_W0_ICV_OR_SKEY_EN_S
;
439 if (sec_resp
->w1
& SEC_BD_W1_BD_INVALID
|| icv_or_skey_en
== 3) {
440 dev_err(dev
, "Got an invalid answer %lu %d\n",
441 sec_resp
->w1
& SEC_BD_W1_BD_INVALID
,
443 sec_req
->err
= -EINVAL
;
445 * We need to muddle on to avoid getting stuck with elements
446 * on the queue. Error will be reported so requester so
447 * it should be able to handle appropriately.
451 mutex_lock(&ctx
->queue
->queuelock
);
452 /* Put the IV in place for chained cases */
453 switch (ctx
->cipher_alg
) {
454 case SEC_C_AES_CBC_128
:
455 case SEC_C_AES_CBC_192
:
456 case SEC_C_AES_CBC_256
:
457 if (sec_req_el
->req
.w0
& SEC_BD_W0_DE
)
458 sg_pcopy_to_buffer(sec_req_el
->sgl_out
,
459 sg_nents(sec_req_el
->sgl_out
),
461 crypto_skcipher_ivsize(atfm
),
462 sec_req_el
->el_length
-
463 crypto_skcipher_ivsize(atfm
));
465 sg_pcopy_to_buffer(sec_req_el
->sgl_in
,
466 sg_nents(sec_req_el
->sgl_in
),
468 crypto_skcipher_ivsize(atfm
),
469 sec_req_el
->el_length
-
470 crypto_skcipher_ivsize(atfm
));
471 /* No need to sync to the device as coherent DMA */
473 case SEC_C_AES_CTR_128
:
474 case SEC_C_AES_CTR_192
:
475 case SEC_C_AES_CTR_256
:
476 crypto_inc(skreq
->iv
, 16);
483 if (ctx
->queue
->havesoftqueue
&&
484 !kfifo_is_empty(&ctx
->queue
->softqueue
) &&
485 sec_queue_empty(ctx
->queue
)) {
486 ret
= kfifo_get(&ctx
->queue
->softqueue
, &nextrequest
);
489 "Error getting next element from kfifo %d\n",
492 /* We know there is space so this cannot fail */
493 sec_queue_send(ctx
->queue
, &nextrequest
->req
,
494 nextrequest
->sec_req
);
495 } else if (!list_empty(&ctx
->backlog
)) {
496 /* Need to verify there is room first */
497 backlog_req
= list_first_entry(&ctx
->backlog
,
498 typeof(*backlog_req
),
500 if (sec_queue_can_enqueue(ctx
->queue
,
501 backlog_req
->num_elements
) ||
502 (ctx
->queue
->havesoftqueue
&&
503 kfifo_avail(&ctx
->queue
->softqueue
) >
504 backlog_req
->num_elements
)) {
505 sec_send_request(backlog_req
, ctx
->queue
);
506 backlog_req
->req_base
->complete(backlog_req
->req_base
,
508 list_del(&backlog_req
->backlog_head
);
511 mutex_unlock(&ctx
->queue
->queuelock
);
513 mutex_lock(&sec_req
->lock
);
514 list_del(&sec_req_el
->head
);
515 mutex_unlock(&sec_req
->lock
);
516 sec_alg_free_el(sec_req_el
, ctx
->queue
->dev_info
);
520 * The dance is needed as the lock is freed in the completion
522 mutex_lock(&sec_req
->lock
);
523 done
= list_empty(&sec_req
->elements
);
524 mutex_unlock(&sec_req
->lock
);
526 if (crypto_skcipher_ivsize(atfm
)) {
527 dma_unmap_single(dev
, sec_req
->dma_iv
,
528 crypto_skcipher_ivsize(atfm
),
531 dma_unmap_sg(dev
, skreq
->src
, sec_req
->len_in
,
533 if (skreq
->src
!= skreq
->dst
)
534 dma_unmap_sg(dev
, skreq
->dst
, sec_req
->len_out
,
536 skreq
->base
.complete(&skreq
->base
, sec_req
->err
);
540 void sec_alg_callback(struct sec_bd_info
*resp
, void *shadow
)
542 struct sec_request
*sec_req
= shadow
;
544 sec_req
->cb(resp
, sec_req
->req_base
);
547 static int sec_alg_alloc_and_calc_split_sizes(int length
, size_t **split_sizes
,
553 /* Split into suitable sized blocks */
554 *steps
= roundup(length
, SEC_REQ_LIMIT
) / SEC_REQ_LIMIT
;
555 sizes
= kcalloc(*steps
, sizeof(*sizes
), GFP_KERNEL
);
559 for (i
= 0; i
< *steps
- 1; i
++)
560 sizes
[i
] = SEC_REQ_LIMIT
;
561 sizes
[*steps
- 1] = length
- SEC_REQ_LIMIT
* (*steps
- 1);
562 *split_sizes
= sizes
;
567 static int sec_map_and_split_sg(struct scatterlist
*sgl
, size_t *split_sizes
,
568 int steps
, struct scatterlist
***splits
,
575 count
= dma_map_sg(dev
, sgl
, sgl_len_in
, DMA_BIDIRECTIONAL
);
579 *splits
= kcalloc(steps
, sizeof(struct scatterlist
*), GFP_KERNEL
);
584 *splits_nents
= kcalloc(steps
, sizeof(int), GFP_KERNEL
);
585 if (!*splits_nents
) {
587 goto err_free_splits
;
590 /* output the scatter list before and after this */
591 ret
= sg_split(sgl
, count
, 0, steps
, split_sizes
,
592 *splits
, *splits_nents
, GFP_KERNEL
);
595 goto err_free_splits_nents
;
600 err_free_splits_nents
:
601 kfree(*splits_nents
);
605 dma_unmap_sg(dev
, sgl
, sgl_len_in
, DMA_BIDIRECTIONAL
);
611 * Reverses the sec_map_and_split_sg call for messages not yet added to
614 static void sec_unmap_sg_on_err(struct scatterlist
*sgl
, int steps
,
615 struct scatterlist
**splits
, int *splits_nents
,
616 int sgl_len_in
, struct device
*dev
)
620 for (i
= 0; i
< steps
; i
++)
625 dma_unmap_sg(dev
, sgl
, sgl_len_in
, DMA_BIDIRECTIONAL
);
628 static struct sec_request_el
629 *sec_alg_alloc_and_fill_el(struct sec_bd_info
*template, int encrypt
,
630 int el_size
, bool different_dest
,
631 struct scatterlist
*sgl_in
, int n_ents_in
,
632 struct scatterlist
*sgl_out
, int n_ents_out
,
633 struct sec_dev_info
*info
)
635 struct sec_request_el
*el
;
636 struct sec_bd_info
*req
;
639 el
= kzalloc(sizeof(*el
), GFP_KERNEL
);
641 return ERR_PTR(-ENOMEM
);
642 el
->el_length
= el_size
;
644 memcpy(req
, template, sizeof(*req
));
646 req
->w0
&= ~SEC_BD_W0_CIPHER_M
;
648 req
->w0
|= SEC_CIPHER_ENCRYPT
<< SEC_BD_W0_CIPHER_S
;
650 req
->w0
|= SEC_CIPHER_DECRYPT
<< SEC_BD_W0_CIPHER_S
;
652 req
->w0
&= ~SEC_BD_W0_C_GRAN_SIZE_19_16_M
;
653 req
->w0
|= ((el_size
>> 16) << SEC_BD_W0_C_GRAN_SIZE_19_16_S
) &
654 SEC_BD_W0_C_GRAN_SIZE_19_16_M
;
656 req
->w0
&= ~SEC_BD_W0_C_GRAN_SIZE_21_20_M
;
657 req
->w0
|= ((el_size
>> 20) << SEC_BD_W0_C_GRAN_SIZE_21_20_S
) &
658 SEC_BD_W0_C_GRAN_SIZE_21_20_M
;
660 /* Writing whole u32 so no need to take care of masking */
661 req
->w2
= ((1 << SEC_BD_W2_GRAN_NUM_S
) & SEC_BD_W2_GRAN_NUM_M
) |
662 ((el_size
<< SEC_BD_W2_C_GRAN_SIZE_15_0_S
) &
663 SEC_BD_W2_C_GRAN_SIZE_15_0_M
);
665 req
->w3
&= ~SEC_BD_W3_CIPHER_LEN_OFFSET_M
;
666 req
->w1
|= SEC_BD_W1_ADDR_TYPE
;
670 ret
= sec_alloc_and_fill_hw_sgl(&el
->in
, &el
->dma_in
, el
->sgl_in
,
675 req
->data_addr_lo
= lower_32_bits(el
->dma_in
);
676 req
->data_addr_hi
= upper_32_bits(el
->dma_in
);
678 if (different_dest
) {
679 el
->sgl_out
= sgl_out
;
680 ret
= sec_alloc_and_fill_hw_sgl(&el
->out
, &el
->dma_out
,
684 goto err_free_hw_sgl_in
;
686 req
->w0
|= SEC_BD_W0_DE
;
687 req
->cipher_destin_addr_lo
= lower_32_bits(el
->dma_out
);
688 req
->cipher_destin_addr_hi
= upper_32_bits(el
->dma_out
);
691 req
->w0
&= ~SEC_BD_W0_DE
;
692 req
->cipher_destin_addr_lo
= lower_32_bits(el
->dma_in
);
693 req
->cipher_destin_addr_hi
= upper_32_bits(el
->dma_in
);
699 sec_free_hw_sgl(el
->in
, el
->dma_in
, info
);
706 static int sec_alg_skcipher_crypto(struct skcipher_request
*skreq
,
709 struct crypto_skcipher
*atfm
= crypto_skcipher_reqtfm(skreq
);
710 struct crypto_tfm
*tfm
= crypto_skcipher_tfm(atfm
);
711 struct sec_alg_tfm_ctx
*ctx
= crypto_tfm_ctx(tfm
);
712 struct sec_queue
*queue
= ctx
->queue
;
713 struct sec_request
*sec_req
= skcipher_request_ctx(skreq
);
714 struct sec_dev_info
*info
= queue
->dev_info
;
717 struct scatterlist
**splits_in
;
718 struct scatterlist
**splits_out
= NULL
;
719 int *splits_in_nents
;
720 int *splits_out_nents
= NULL
;
721 struct sec_request_el
*el
, *temp
;
722 bool split
= skreq
->src
!= skreq
->dst
;
724 mutex_init(&sec_req
->lock
);
725 sec_req
->req_base
= &skreq
->base
;
727 /* SGL mapping out here to allow us to break it up as necessary */
728 sec_req
->len_in
= sg_nents(skreq
->src
);
730 ret
= sec_alg_alloc_and_calc_split_sizes(skreq
->cryptlen
, &split_sizes
,
734 sec_req
->num_elements
= steps
;
735 ret
= sec_map_and_split_sg(skreq
->src
, split_sizes
, steps
, &splits_in
,
736 &splits_in_nents
, sec_req
->len_in
,
739 goto err_free_split_sizes
;
742 sec_req
->len_out
= sg_nents(skreq
->dst
);
743 ret
= sec_map_and_split_sg(skreq
->dst
, split_sizes
, steps
,
744 &splits_out
, &splits_out_nents
,
745 sec_req
->len_out
, info
->dev
);
747 goto err_unmap_in_sg
;
749 /* Shared info stored in seq_req - applies to all BDs */
750 sec_req
->tfm_ctx
= ctx
;
751 sec_req
->cb
= sec_skcipher_alg_callback
;
752 INIT_LIST_HEAD(&sec_req
->elements
);
755 * Future optimization.
756 * In the chaining case we can't use a dma pool bounce buffer
757 * but in the case where we know there is no chaining we can
759 if (crypto_skcipher_ivsize(atfm
)) {
760 sec_req
->dma_iv
= dma_map_single(info
->dev
, skreq
->iv
,
761 crypto_skcipher_ivsize(atfm
),
763 if (dma_mapping_error(info
->dev
, sec_req
->dma_iv
)) {
765 goto err_unmap_out_sg
;
769 /* Set them all up then queue - cleaner error handling. */
770 for (i
= 0; i
< steps
; i
++) {
771 el
= sec_alg_alloc_and_fill_el(&ctx
->req_template
,
774 skreq
->src
!= skreq
->dst
,
775 splits_in
[i
], splits_in_nents
[i
],
776 split
? splits_out
[i
] : NULL
,
777 split
? splits_out_nents
[i
] : 0,
781 goto err_free_elements
;
783 el
->req
.cipher_iv_addr_lo
= lower_32_bits(sec_req
->dma_iv
);
784 el
->req
.cipher_iv_addr_hi
= upper_32_bits(sec_req
->dma_iv
);
785 el
->sec_req
= sec_req
;
786 list_add_tail(&el
->head
, &sec_req
->elements
);
790 * Only attempt to queue if the whole lot can fit in the queue -
791 * we can't successfully cleanup after a partial queing so this
792 * must succeed or fail atomically.
794 * Big hammer test of both software and hardware queues - could be
795 * more refined but this is unlikely to happen so no need.
798 /* Grab a big lock for a long time to avoid concurrency issues */
799 mutex_lock(&queue
->queuelock
);
802 * Can go on to queue if we have space in either:
803 * 1) The hardware queue and no software queue
804 * 2) The software queue
805 * AND there is nothing in the backlog. If there is backlog we
806 * have to only queue to the backlog queue and return busy.
808 if ((!sec_queue_can_enqueue(queue
, steps
) &&
809 (!queue
->havesoftqueue
||
810 kfifo_avail(&queue
->softqueue
) > steps
)) ||
811 !list_empty(&ctx
->backlog
)) {
813 if ((skreq
->base
.flags
& CRYPTO_TFM_REQ_MAY_BACKLOG
)) {
814 list_add_tail(&sec_req
->backlog_head
, &ctx
->backlog
);
815 mutex_unlock(&queue
->queuelock
);
819 mutex_unlock(&queue
->queuelock
);
820 goto err_free_elements
;
822 ret
= sec_send_request(sec_req
, queue
);
823 mutex_unlock(&queue
->queuelock
);
825 goto err_free_elements
;
829 /* Cleanup - all elements in pointer arrays have been copied */
830 kfree(splits_in_nents
);
832 kfree(splits_out_nents
);
838 list_for_each_entry_safe(el
, temp
, &sec_req
->elements
, head
) {
840 sec_alg_free_el(el
, info
);
842 if (crypto_skcipher_ivsize(atfm
))
843 dma_unmap_single(info
->dev
, sec_req
->dma_iv
,
844 crypto_skcipher_ivsize(atfm
),
848 sec_unmap_sg_on_err(skreq
->dst
, steps
, splits_out
,
849 splits_out_nents
, sec_req
->len_out
,
852 sec_unmap_sg_on_err(skreq
->src
, steps
, splits_in
, splits_in_nents
,
853 sec_req
->len_in
, info
->dev
);
854 err_free_split_sizes
:
860 static int sec_alg_skcipher_encrypt(struct skcipher_request
*req
)
862 return sec_alg_skcipher_crypto(req
, true);
865 static int sec_alg_skcipher_decrypt(struct skcipher_request
*req
)
867 return sec_alg_skcipher_crypto(req
, false);
870 static int sec_alg_skcipher_init(struct crypto_skcipher
*tfm
)
872 struct sec_alg_tfm_ctx
*ctx
= crypto_skcipher_ctx(tfm
);
874 mutex_init(&ctx
->lock
);
875 INIT_LIST_HEAD(&ctx
->backlog
);
876 crypto_skcipher_set_reqsize(tfm
, sizeof(struct sec_request
));
878 ctx
->queue
= sec_queue_alloc_start_safe();
879 if (IS_ERR(ctx
->queue
))
880 return PTR_ERR(ctx
->queue
);
882 mutex_init(&ctx
->queue
->queuelock
);
883 ctx
->queue
->havesoftqueue
= false;
888 static void sec_alg_skcipher_exit(struct crypto_skcipher
*tfm
)
890 struct sec_alg_tfm_ctx
*ctx
= crypto_skcipher_ctx(tfm
);
891 struct device
*dev
= ctx
->queue
->dev_info
->dev
;
894 memzero_explicit(ctx
->key
, SEC_MAX_CIPHER_KEY
);
895 dma_free_coherent(dev
, SEC_MAX_CIPHER_KEY
, ctx
->key
,
898 sec_queue_stop_release(ctx
->queue
);
901 static int sec_alg_skcipher_init_with_queue(struct crypto_skcipher
*tfm
)
903 struct sec_alg_tfm_ctx
*ctx
= crypto_skcipher_ctx(tfm
);
906 ret
= sec_alg_skcipher_init(tfm
);
910 INIT_KFIFO(ctx
->queue
->softqueue
);
911 ret
= kfifo_alloc(&ctx
->queue
->softqueue
, 512, GFP_KERNEL
);
913 sec_alg_skcipher_exit(tfm
);
916 ctx
->queue
->havesoftqueue
= true;
921 static void sec_alg_skcipher_exit_with_queue(struct crypto_skcipher
*tfm
)
923 struct sec_alg_tfm_ctx
*ctx
= crypto_skcipher_ctx(tfm
);
925 kfifo_free(&ctx
->queue
->softqueue
);
926 sec_alg_skcipher_exit(tfm
);
929 static struct skcipher_alg sec_algs
[] = {
932 .cra_name
= "ecb(aes)",
933 .cra_driver_name
= "hisi_sec_aes_ecb",
934 .cra_priority
= 4001,
935 .cra_flags
= CRYPTO_ALG_ASYNC
,
936 .cra_blocksize
= AES_BLOCK_SIZE
,
937 .cra_ctxsize
= sizeof(struct sec_alg_tfm_ctx
),
939 .cra_module
= THIS_MODULE
,
941 .init
= sec_alg_skcipher_init
,
942 .exit
= sec_alg_skcipher_exit
,
943 .setkey
= sec_alg_skcipher_setkey_aes_ecb
,
944 .decrypt
= sec_alg_skcipher_decrypt
,
945 .encrypt
= sec_alg_skcipher_encrypt
,
946 .min_keysize
= AES_MIN_KEY_SIZE
,
947 .max_keysize
= AES_MAX_KEY_SIZE
,
951 .cra_name
= "cbc(aes)",
952 .cra_driver_name
= "hisi_sec_aes_cbc",
953 .cra_priority
= 4001,
954 .cra_flags
= CRYPTO_ALG_ASYNC
,
955 .cra_blocksize
= AES_BLOCK_SIZE
,
956 .cra_ctxsize
= sizeof(struct sec_alg_tfm_ctx
),
958 .cra_module
= THIS_MODULE
,
960 .init
= sec_alg_skcipher_init_with_queue
,
961 .exit
= sec_alg_skcipher_exit_with_queue
,
962 .setkey
= sec_alg_skcipher_setkey_aes_cbc
,
963 .decrypt
= sec_alg_skcipher_decrypt
,
964 .encrypt
= sec_alg_skcipher_encrypt
,
965 .min_keysize
= AES_MIN_KEY_SIZE
,
966 .max_keysize
= AES_MAX_KEY_SIZE
,
967 .ivsize
= AES_BLOCK_SIZE
,
970 .cra_name
= "ctr(aes)",
971 .cra_driver_name
= "hisi_sec_aes_ctr",
972 .cra_priority
= 4001,
973 .cra_flags
= CRYPTO_ALG_ASYNC
,
974 .cra_blocksize
= AES_BLOCK_SIZE
,
975 .cra_ctxsize
= sizeof(struct sec_alg_tfm_ctx
),
977 .cra_module
= THIS_MODULE
,
979 .init
= sec_alg_skcipher_init_with_queue
,
980 .exit
= sec_alg_skcipher_exit_with_queue
,
981 .setkey
= sec_alg_skcipher_setkey_aes_ctr
,
982 .decrypt
= sec_alg_skcipher_decrypt
,
983 .encrypt
= sec_alg_skcipher_encrypt
,
984 .min_keysize
= AES_MIN_KEY_SIZE
,
985 .max_keysize
= AES_MAX_KEY_SIZE
,
986 .ivsize
= AES_BLOCK_SIZE
,
989 .cra_name
= "xts(aes)",
990 .cra_driver_name
= "hisi_sec_aes_xts",
991 .cra_priority
= 4001,
992 .cra_flags
= CRYPTO_ALG_ASYNC
,
993 .cra_blocksize
= AES_BLOCK_SIZE
,
994 .cra_ctxsize
= sizeof(struct sec_alg_tfm_ctx
),
996 .cra_module
= THIS_MODULE
,
998 .init
= sec_alg_skcipher_init
,
999 .exit
= sec_alg_skcipher_exit
,
1000 .setkey
= sec_alg_skcipher_setkey_aes_xts
,
1001 .decrypt
= sec_alg_skcipher_decrypt
,
1002 .encrypt
= sec_alg_skcipher_encrypt
,
1003 .min_keysize
= 2 * AES_MIN_KEY_SIZE
,
1004 .max_keysize
= 2 * AES_MAX_KEY_SIZE
,
1005 .ivsize
= AES_BLOCK_SIZE
,
1007 /* Unable to find any test vectors so untested */
1009 .cra_name
= "ecb(des)",
1010 .cra_driver_name
= "hisi_sec_des_ecb",
1011 .cra_priority
= 4001,
1012 .cra_flags
= CRYPTO_ALG_ASYNC
,
1013 .cra_blocksize
= DES_BLOCK_SIZE
,
1014 .cra_ctxsize
= sizeof(struct sec_alg_tfm_ctx
),
1016 .cra_module
= THIS_MODULE
,
1018 .init
= sec_alg_skcipher_init
,
1019 .exit
= sec_alg_skcipher_exit
,
1020 .setkey
= sec_alg_skcipher_setkey_des_ecb
,
1021 .decrypt
= sec_alg_skcipher_decrypt
,
1022 .encrypt
= sec_alg_skcipher_encrypt
,
1023 .min_keysize
= DES_KEY_SIZE
,
1024 .max_keysize
= DES_KEY_SIZE
,
1028 .cra_name
= "cbc(des)",
1029 .cra_driver_name
= "hisi_sec_des_cbc",
1030 .cra_priority
= 4001,
1031 .cra_flags
= CRYPTO_ALG_ASYNC
,
1032 .cra_blocksize
= DES_BLOCK_SIZE
,
1033 .cra_ctxsize
= sizeof(struct sec_alg_tfm_ctx
),
1035 .cra_module
= THIS_MODULE
,
1037 .init
= sec_alg_skcipher_init_with_queue
,
1038 .exit
= sec_alg_skcipher_exit_with_queue
,
1039 .setkey
= sec_alg_skcipher_setkey_des_cbc
,
1040 .decrypt
= sec_alg_skcipher_decrypt
,
1041 .encrypt
= sec_alg_skcipher_encrypt
,
1042 .min_keysize
= DES_KEY_SIZE
,
1043 .max_keysize
= DES_KEY_SIZE
,
1044 .ivsize
= DES_BLOCK_SIZE
,
1047 .cra_name
= "cbc(des3_ede)",
1048 .cra_driver_name
= "hisi_sec_3des_cbc",
1049 .cra_priority
= 4001,
1050 .cra_flags
= CRYPTO_ALG_ASYNC
,
1051 .cra_blocksize
= DES3_EDE_BLOCK_SIZE
,
1052 .cra_ctxsize
= sizeof(struct sec_alg_tfm_ctx
),
1054 .cra_module
= THIS_MODULE
,
1056 .init
= sec_alg_skcipher_init_with_queue
,
1057 .exit
= sec_alg_skcipher_exit_with_queue
,
1058 .setkey
= sec_alg_skcipher_setkey_3des_cbc
,
1059 .decrypt
= sec_alg_skcipher_decrypt
,
1060 .encrypt
= sec_alg_skcipher_encrypt
,
1061 .min_keysize
= DES3_EDE_KEY_SIZE
,
1062 .max_keysize
= DES3_EDE_KEY_SIZE
,
1063 .ivsize
= DES3_EDE_BLOCK_SIZE
,
1066 .cra_name
= "ecb(des3_ede)",
1067 .cra_driver_name
= "hisi_sec_3des_ecb",
1068 .cra_priority
= 4001,
1069 .cra_flags
= CRYPTO_ALG_ASYNC
,
1070 .cra_blocksize
= DES3_EDE_BLOCK_SIZE
,
1071 .cra_ctxsize
= sizeof(struct sec_alg_tfm_ctx
),
1073 .cra_module
= THIS_MODULE
,
1075 .init
= sec_alg_skcipher_init
,
1076 .exit
= sec_alg_skcipher_exit
,
1077 .setkey
= sec_alg_skcipher_setkey_3des_ecb
,
1078 .decrypt
= sec_alg_skcipher_decrypt
,
1079 .encrypt
= sec_alg_skcipher_encrypt
,
1080 .min_keysize
= DES3_EDE_KEY_SIZE
,
1081 .max_keysize
= DES3_EDE_KEY_SIZE
,
1086 int sec_algs_register(void)
1090 mutex_lock(&algs_lock
);
1091 if (++active_devs
!= 1)
1094 ret
= crypto_register_skciphers(sec_algs
, ARRAY_SIZE(sec_algs
));
1098 mutex_unlock(&algs_lock
);
1103 void sec_algs_unregister(void)
1105 mutex_lock(&algs_lock
);
1106 if (--active_devs
!= 0)
1108 crypto_unregister_skciphers(sec_algs
, ARRAY_SIZE(sec_algs
));
1111 mutex_unlock(&algs_lock
);