2 * Copyright (C) 2017 Marvell
4 * Antoine Tenart <antoine.tenart@free-electrons.com>
6 * This file is licensed under the terms of the GNU General Public
7 * License version 2. This program is licensed "as is" without any
8 * warranty of any kind, whether express or implied.
11 #include <linux/device.h>
12 #include <linux/dma-mapping.h>
13 #include <linux/dmapool.h>
15 #include <crypto/aes.h>
16 #include <crypto/skcipher.h>
17 #include <crypto/internal/skcipher.h>
21 enum safexcel_cipher_direction
{
26 struct safexcel_cipher_ctx
{
27 struct safexcel_context base
;
28 struct safexcel_crypto_priv
*priv
;
36 struct safexcel_cipher_req
{
37 enum safexcel_cipher_direction direction
;
41 static void safexcel_cipher_token(struct safexcel_cipher_ctx
*ctx
,
42 struct crypto_async_request
*async
,
43 struct safexcel_command_desc
*cdesc
,
46 struct skcipher_request
*req
= skcipher_request_cast(async
);
47 struct safexcel_token
*token
;
50 if (ctx
->mode
== CONTEXT_CONTROL_CRYPTO_MODE_CBC
) {
51 offset
= AES_BLOCK_SIZE
/ sizeof(u32
);
52 memcpy(cdesc
->control_data
.token
, req
->iv
, AES_BLOCK_SIZE
);
54 cdesc
->control_data
.options
|= EIP197_OPTION_4_TOKEN_IV_CMD
;
57 token
= (struct safexcel_token
*)(cdesc
->control_data
.token
+ offset
);
59 token
[0].opcode
= EIP197_TOKEN_OPCODE_DIRECTION
;
60 token
[0].packet_length
= length
;
61 token
[0].stat
= EIP197_TOKEN_STAT_LAST_PACKET
;
62 token
[0].instructions
= EIP197_TOKEN_INS_LAST
|
63 EIP197_TOKEN_INS_TYPE_CRYTO
|
64 EIP197_TOKEN_INS_TYPE_OUTPUT
;
67 static int safexcel_aes_setkey(struct crypto_skcipher
*ctfm
, const u8
*key
,
70 struct crypto_tfm
*tfm
= crypto_skcipher_tfm(ctfm
);
71 struct safexcel_cipher_ctx
*ctx
= crypto_tfm_ctx(tfm
);
72 struct safexcel_crypto_priv
*priv
= ctx
->priv
;
73 struct crypto_aes_ctx aes
;
76 ret
= crypto_aes_expand_key(&aes
, key
, len
);
78 crypto_skcipher_set_flags(ctfm
, CRYPTO_TFM_RES_BAD_KEY_LEN
);
82 if (priv
->version
== EIP197
&& ctx
->base
.ctxr_dma
) {
83 for (i
= 0; i
< len
/ sizeof(u32
); i
++) {
84 if (ctx
->key
[i
] != cpu_to_le32(aes
.key_enc
[i
])) {
85 ctx
->base
.needs_inv
= true;
91 for (i
= 0; i
< len
/ sizeof(u32
); i
++)
92 ctx
->key
[i
] = cpu_to_le32(aes
.key_enc
[i
]);
96 memzero_explicit(&aes
, sizeof(aes
));
100 static int safexcel_context_control(struct safexcel_cipher_ctx
*ctx
,
101 struct crypto_async_request
*async
,
102 struct safexcel_command_desc
*cdesc
)
104 struct safexcel_crypto_priv
*priv
= ctx
->priv
;
105 struct skcipher_request
*req
= skcipher_request_cast(async
);
106 struct safexcel_cipher_req
*sreq
= skcipher_request_ctx(req
);
109 if (sreq
->direction
== SAFEXCEL_ENCRYPT
)
110 cdesc
->control_data
.control0
|= CONTEXT_CONTROL_TYPE_CRYPTO_OUT
;
112 cdesc
->control_data
.control0
|= CONTEXT_CONTROL_TYPE_CRYPTO_IN
;
114 cdesc
->control_data
.control0
|= CONTEXT_CONTROL_KEY_EN
;
115 cdesc
->control_data
.control1
|= ctx
->mode
;
117 switch (ctx
->key_len
) {
118 case AES_KEYSIZE_128
:
119 cdesc
->control_data
.control0
|= CONTEXT_CONTROL_CRYPTO_ALG_AES128
;
122 case AES_KEYSIZE_192
:
123 cdesc
->control_data
.control0
|= CONTEXT_CONTROL_CRYPTO_ALG_AES192
;
126 case AES_KEYSIZE_256
:
127 cdesc
->control_data
.control0
|= CONTEXT_CONTROL_CRYPTO_ALG_AES256
;
131 dev_err(priv
->dev
, "aes keysize not supported: %u\n",
135 cdesc
->control_data
.control0
|= CONTEXT_CONTROL_SIZE(ctrl_size
);
140 static int safexcel_handle_req_result(struct safexcel_crypto_priv
*priv
, int ring
,
141 struct crypto_async_request
*async
,
142 bool *should_complete
, int *ret
)
144 struct skcipher_request
*req
= skcipher_request_cast(async
);
145 struct safexcel_result_desc
*rdesc
;
150 spin_lock_bh(&priv
->ring
[ring
].egress_lock
);
152 rdesc
= safexcel_ring_next_rptr(priv
, &priv
->ring
[ring
].rdr
);
155 "cipher: result: could not retrieve the result descriptor\n");
156 *ret
= PTR_ERR(rdesc
);
160 if (rdesc
->result_data
.error_code
) {
162 "cipher: result: result descriptor error (%d)\n",
163 rdesc
->result_data
.error_code
);
168 } while (!rdesc
->last_seg
);
170 safexcel_complete(priv
, ring
);
171 spin_unlock_bh(&priv
->ring
[ring
].egress_lock
);
173 if (req
->src
== req
->dst
) {
174 dma_unmap_sg(priv
->dev
, req
->src
,
175 sg_nents_for_len(req
->src
, req
->cryptlen
),
178 dma_unmap_sg(priv
->dev
, req
->src
,
179 sg_nents_for_len(req
->src
, req
->cryptlen
),
181 dma_unmap_sg(priv
->dev
, req
->dst
,
182 sg_nents_for_len(req
->dst
, req
->cryptlen
),
186 *should_complete
= true;
191 static int safexcel_aes_send(struct crypto_async_request
*async
,
192 int ring
, struct safexcel_request
*request
,
193 int *commands
, int *results
)
195 struct skcipher_request
*req
= skcipher_request_cast(async
);
196 struct safexcel_cipher_ctx
*ctx
= crypto_tfm_ctx(req
->base
.tfm
);
197 struct safexcel_crypto_priv
*priv
= ctx
->priv
;
198 struct safexcel_command_desc
*cdesc
;
199 struct safexcel_result_desc
*rdesc
;
200 struct scatterlist
*sg
;
201 int nr_src
, nr_dst
, n_cdesc
= 0, n_rdesc
= 0, queued
= req
->cryptlen
;
204 if (req
->src
== req
->dst
) {
205 nr_src
= dma_map_sg(priv
->dev
, req
->src
,
206 sg_nents_for_len(req
->src
, req
->cryptlen
),
212 nr_src
= dma_map_sg(priv
->dev
, req
->src
,
213 sg_nents_for_len(req
->src
, req
->cryptlen
),
218 nr_dst
= dma_map_sg(priv
->dev
, req
->dst
,
219 sg_nents_for_len(req
->dst
, req
->cryptlen
),
222 dma_unmap_sg(priv
->dev
, req
->src
,
223 sg_nents_for_len(req
->src
, req
->cryptlen
),
229 memcpy(ctx
->base
.ctxr
->data
, ctx
->key
, ctx
->key_len
);
231 spin_lock_bh(&priv
->ring
[ring
].egress_lock
);
233 /* command descriptors */
234 for_each_sg(req
->src
, sg
, nr_src
, i
) {
235 int len
= sg_dma_len(sg
);
237 /* Do not overflow the request */
238 if (queued
- len
< 0)
241 cdesc
= safexcel_add_cdesc(priv
, ring
, !n_cdesc
, !(queued
- len
),
242 sg_dma_address(sg
), len
, req
->cryptlen
,
245 /* No space left in the command descriptor ring */
246 ret
= PTR_ERR(cdesc
);
252 safexcel_context_control(ctx
, async
, cdesc
);
253 safexcel_cipher_token(ctx
, async
, cdesc
, req
->cryptlen
);
261 /* result descriptors */
262 for_each_sg(req
->dst
, sg
, nr_dst
, i
) {
263 bool first
= !i
, last
= (i
== nr_dst
- 1);
264 u32 len
= sg_dma_len(sg
);
266 rdesc
= safexcel_add_rdesc(priv
, ring
, first
, last
,
267 sg_dma_address(sg
), len
);
269 /* No space left in the result descriptor ring */
270 ret
= PTR_ERR(rdesc
);
276 spin_unlock_bh(&priv
->ring
[ring
].egress_lock
);
278 request
->req
= &req
->base
;
285 for (i
= 0; i
< n_rdesc
; i
++)
286 safexcel_ring_rollback_wptr(priv
, &priv
->ring
[ring
].rdr
);
288 for (i
= 0; i
< n_cdesc
; i
++)
289 safexcel_ring_rollback_wptr(priv
, &priv
->ring
[ring
].cdr
);
291 spin_unlock_bh(&priv
->ring
[ring
].egress_lock
);
293 if (req
->src
== req
->dst
) {
294 dma_unmap_sg(priv
->dev
, req
->src
,
295 sg_nents_for_len(req
->src
, req
->cryptlen
),
298 dma_unmap_sg(priv
->dev
, req
->src
,
299 sg_nents_for_len(req
->src
, req
->cryptlen
),
301 dma_unmap_sg(priv
->dev
, req
->dst
,
302 sg_nents_for_len(req
->dst
, req
->cryptlen
),
309 static int safexcel_handle_inv_result(struct safexcel_crypto_priv
*priv
,
311 struct crypto_async_request
*async
,
312 bool *should_complete
, int *ret
)
314 struct skcipher_request
*req
= skcipher_request_cast(async
);
315 struct safexcel_cipher_ctx
*ctx
= crypto_tfm_ctx(req
->base
.tfm
);
316 struct safexcel_result_desc
*rdesc
;
317 int ndesc
= 0, enq_ret
;
321 spin_lock_bh(&priv
->ring
[ring
].egress_lock
);
323 rdesc
= safexcel_ring_next_rptr(priv
, &priv
->ring
[ring
].rdr
);
326 "cipher: invalidate: could not retrieve the result descriptor\n");
327 *ret
= PTR_ERR(rdesc
);
331 if (rdesc
->result_data
.error_code
) {
332 dev_err(priv
->dev
, "cipher: invalidate: result descriptor error (%d)\n",
333 rdesc
->result_data
.error_code
);
338 } while (!rdesc
->last_seg
);
340 safexcel_complete(priv
, ring
);
341 spin_unlock_bh(&priv
->ring
[ring
].egress_lock
);
343 if (ctx
->base
.exit_inv
) {
344 dma_pool_free(priv
->context_pool
, ctx
->base
.ctxr
,
347 *should_complete
= true;
352 ring
= safexcel_select_ring(priv
);
353 ctx
->base
.ring
= ring
;
355 spin_lock_bh(&priv
->ring
[ring
].queue_lock
);
356 enq_ret
= crypto_enqueue_request(&priv
->ring
[ring
].queue
, async
);
357 spin_unlock_bh(&priv
->ring
[ring
].queue_lock
);
359 if (enq_ret
!= -EINPROGRESS
)
362 queue_work(priv
->ring
[ring
].workqueue
,
363 &priv
->ring
[ring
].work_data
.work
);
365 *should_complete
= false;
370 static int safexcel_handle_result(struct safexcel_crypto_priv
*priv
, int ring
,
371 struct crypto_async_request
*async
,
372 bool *should_complete
, int *ret
)
374 struct skcipher_request
*req
= skcipher_request_cast(async
);
375 struct safexcel_cipher_req
*sreq
= skcipher_request_ctx(req
);
378 if (sreq
->needs_inv
) {
379 sreq
->needs_inv
= false;
380 err
= safexcel_handle_inv_result(priv
, ring
, async
,
381 should_complete
, ret
);
383 err
= safexcel_handle_req_result(priv
, ring
, async
,
384 should_complete
, ret
);
390 static int safexcel_cipher_send_inv(struct crypto_async_request
*async
,
391 int ring
, struct safexcel_request
*request
,
392 int *commands
, int *results
)
394 struct skcipher_request
*req
= skcipher_request_cast(async
);
395 struct safexcel_cipher_ctx
*ctx
= crypto_tfm_ctx(req
->base
.tfm
);
396 struct safexcel_crypto_priv
*priv
= ctx
->priv
;
399 ret
= safexcel_invalidate_cache(async
, priv
,
400 ctx
->base
.ctxr_dma
, ring
, request
);
410 static int safexcel_send(struct crypto_async_request
*async
,
411 int ring
, struct safexcel_request
*request
,
412 int *commands
, int *results
)
414 struct skcipher_request
*req
= skcipher_request_cast(async
);
415 struct safexcel_cipher_ctx
*ctx
= crypto_tfm_ctx(req
->base
.tfm
);
416 struct safexcel_cipher_req
*sreq
= skcipher_request_ctx(req
);
417 struct safexcel_crypto_priv
*priv
= ctx
->priv
;
420 BUG_ON(priv
->version
== EIP97
&& sreq
->needs_inv
);
423 ret
= safexcel_cipher_send_inv(async
, ring
, request
,
426 ret
= safexcel_aes_send(async
, ring
, request
,
431 static int safexcel_cipher_exit_inv(struct crypto_tfm
*tfm
)
433 struct safexcel_cipher_ctx
*ctx
= crypto_tfm_ctx(tfm
);
434 struct safexcel_crypto_priv
*priv
= ctx
->priv
;
435 SKCIPHER_REQUEST_ON_STACK(req
, __crypto_skcipher_cast(tfm
));
436 struct safexcel_cipher_req
*sreq
= skcipher_request_ctx(req
);
437 struct safexcel_inv_result result
= {};
438 int ring
= ctx
->base
.ring
;
440 memset(req
, 0, sizeof(struct skcipher_request
));
442 /* create invalidation request */
443 init_completion(&result
.completion
);
444 skcipher_request_set_callback(req
, CRYPTO_TFM_REQ_MAY_BACKLOG
,
445 safexcel_inv_complete
, &result
);
447 skcipher_request_set_tfm(req
, __crypto_skcipher_cast(tfm
));
448 ctx
= crypto_tfm_ctx(req
->base
.tfm
);
449 ctx
->base
.exit_inv
= true;
450 sreq
->needs_inv
= true;
452 spin_lock_bh(&priv
->ring
[ring
].queue_lock
);
453 crypto_enqueue_request(&priv
->ring
[ring
].queue
, &req
->base
);
454 spin_unlock_bh(&priv
->ring
[ring
].queue_lock
);
456 queue_work(priv
->ring
[ring
].workqueue
,
457 &priv
->ring
[ring
].work_data
.work
);
459 wait_for_completion_interruptible(&result
.completion
);
463 "cipher: sync: invalidate: completion error %d\n",
471 static int safexcel_aes(struct skcipher_request
*req
,
472 enum safexcel_cipher_direction dir
, u32 mode
)
474 struct safexcel_cipher_ctx
*ctx
= crypto_tfm_ctx(req
->base
.tfm
);
475 struct safexcel_cipher_req
*sreq
= skcipher_request_ctx(req
);
476 struct safexcel_crypto_priv
*priv
= ctx
->priv
;
479 sreq
->needs_inv
= false;
480 sreq
->direction
= dir
;
483 if (ctx
->base
.ctxr
) {
484 if (priv
->version
== EIP197
&& ctx
->base
.needs_inv
) {
485 sreq
->needs_inv
= true;
486 ctx
->base
.needs_inv
= false;
489 ctx
->base
.ring
= safexcel_select_ring(priv
);
490 ctx
->base
.ctxr
= dma_pool_zalloc(priv
->context_pool
,
491 EIP197_GFP_FLAGS(req
->base
),
492 &ctx
->base
.ctxr_dma
);
497 ring
= ctx
->base
.ring
;
499 spin_lock_bh(&priv
->ring
[ring
].queue_lock
);
500 ret
= crypto_enqueue_request(&priv
->ring
[ring
].queue
, &req
->base
);
501 spin_unlock_bh(&priv
->ring
[ring
].queue_lock
);
503 queue_work(priv
->ring
[ring
].workqueue
,
504 &priv
->ring
[ring
].work_data
.work
);
509 static int safexcel_ecb_aes_encrypt(struct skcipher_request
*req
)
511 return safexcel_aes(req
, SAFEXCEL_ENCRYPT
,
512 CONTEXT_CONTROL_CRYPTO_MODE_ECB
);
515 static int safexcel_ecb_aes_decrypt(struct skcipher_request
*req
)
517 return safexcel_aes(req
, SAFEXCEL_DECRYPT
,
518 CONTEXT_CONTROL_CRYPTO_MODE_ECB
);
521 static int safexcel_skcipher_cra_init(struct crypto_tfm
*tfm
)
523 struct safexcel_cipher_ctx
*ctx
= crypto_tfm_ctx(tfm
);
524 struct safexcel_alg_template
*tmpl
=
525 container_of(tfm
->__crt_alg
, struct safexcel_alg_template
,
528 ctx
->priv
= tmpl
->priv
;
529 ctx
->base
.send
= safexcel_send
;
530 ctx
->base
.handle_result
= safexcel_handle_result
;
532 crypto_skcipher_set_reqsize(__crypto_skcipher_cast(tfm
),
533 sizeof(struct safexcel_cipher_req
));
538 static void safexcel_skcipher_cra_exit(struct crypto_tfm
*tfm
)
540 struct safexcel_cipher_ctx
*ctx
= crypto_tfm_ctx(tfm
);
541 struct safexcel_crypto_priv
*priv
= ctx
->priv
;
544 memzero_explicit(ctx
->key
, 8 * sizeof(u32
));
546 /* context not allocated, skip invalidation */
550 memzero_explicit(ctx
->base
.ctxr
->data
, 8 * sizeof(u32
));
552 if (priv
->version
== EIP197
) {
553 ret
= safexcel_cipher_exit_inv(tfm
);
555 dev_warn(priv
->dev
, "cipher: invalidation error %d\n", ret
);
557 dma_pool_free(priv
->context_pool
, ctx
->base
.ctxr
,
562 struct safexcel_alg_template safexcel_alg_ecb_aes
= {
563 .type
= SAFEXCEL_ALG_TYPE_SKCIPHER
,
565 .setkey
= safexcel_aes_setkey
,
566 .encrypt
= safexcel_ecb_aes_encrypt
,
567 .decrypt
= safexcel_ecb_aes_decrypt
,
568 .min_keysize
= AES_MIN_KEY_SIZE
,
569 .max_keysize
= AES_MAX_KEY_SIZE
,
571 .cra_name
= "ecb(aes)",
572 .cra_driver_name
= "safexcel-ecb-aes",
574 .cra_flags
= CRYPTO_ALG_TYPE_SKCIPHER
| CRYPTO_ALG_ASYNC
|
575 CRYPTO_ALG_KERN_DRIVER_ONLY
,
576 .cra_blocksize
= AES_BLOCK_SIZE
,
577 .cra_ctxsize
= sizeof(struct safexcel_cipher_ctx
),
579 .cra_init
= safexcel_skcipher_cra_init
,
580 .cra_exit
= safexcel_skcipher_cra_exit
,
581 .cra_module
= THIS_MODULE
,
586 static int safexcel_cbc_aes_encrypt(struct skcipher_request
*req
)
588 return safexcel_aes(req
, SAFEXCEL_ENCRYPT
,
589 CONTEXT_CONTROL_CRYPTO_MODE_CBC
);
592 static int safexcel_cbc_aes_decrypt(struct skcipher_request
*req
)
594 return safexcel_aes(req
, SAFEXCEL_DECRYPT
,
595 CONTEXT_CONTROL_CRYPTO_MODE_CBC
);
598 struct safexcel_alg_template safexcel_alg_cbc_aes
= {
599 .type
= SAFEXCEL_ALG_TYPE_SKCIPHER
,
601 .setkey
= safexcel_aes_setkey
,
602 .encrypt
= safexcel_cbc_aes_encrypt
,
603 .decrypt
= safexcel_cbc_aes_decrypt
,
604 .min_keysize
= AES_MIN_KEY_SIZE
,
605 .max_keysize
= AES_MAX_KEY_SIZE
,
606 .ivsize
= AES_BLOCK_SIZE
,
608 .cra_name
= "cbc(aes)",
609 .cra_driver_name
= "safexcel-cbc-aes",
611 .cra_flags
= CRYPTO_ALG_TYPE_SKCIPHER
| CRYPTO_ALG_ASYNC
|
612 CRYPTO_ALG_KERN_DRIVER_ONLY
,
613 .cra_blocksize
= AES_BLOCK_SIZE
,
614 .cra_ctxsize
= sizeof(struct safexcel_cipher_ctx
),
616 .cra_init
= safexcel_skcipher_cra_init
,
617 .cra_exit
= safexcel_skcipher_cra_exit
,
618 .cra_module
= THIS_MODULE
,