Merge tag 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mst/vhost
[cris-mirror.git] / drivers / crypto / inside-secure / safexcel_cipher.c
blob63a8768ed2ae7c2008c28af7b16978fca1f89984
1 /*
2 * Copyright (C) 2017 Marvell
4 * Antoine Tenart <antoine.tenart@free-electrons.com>
6 * This file is licensed under the terms of the GNU General Public
7 * License version 2. This program is licensed "as is" without any
8 * warranty of any kind, whether express or implied.
9 */
11 #include <linux/device.h>
12 #include <linux/dma-mapping.h>
13 #include <linux/dmapool.h>
15 #include <crypto/aes.h>
16 #include <crypto/skcipher.h>
17 #include <crypto/internal/skcipher.h>
19 #include "safexcel.h"
21 enum safexcel_cipher_direction {
22 SAFEXCEL_ENCRYPT,
23 SAFEXCEL_DECRYPT,
26 struct safexcel_cipher_ctx {
27 struct safexcel_context base;
28 struct safexcel_crypto_priv *priv;
30 u32 mode;
32 __le32 key[8];
33 unsigned int key_len;
36 struct safexcel_cipher_req {
37 enum safexcel_cipher_direction direction;
38 bool needs_inv;
41 static void safexcel_cipher_token(struct safexcel_cipher_ctx *ctx,
42 struct crypto_async_request *async,
43 struct safexcel_command_desc *cdesc,
44 u32 length)
46 struct skcipher_request *req = skcipher_request_cast(async);
47 struct safexcel_token *token;
48 unsigned offset = 0;
50 if (ctx->mode == CONTEXT_CONTROL_CRYPTO_MODE_CBC) {
51 offset = AES_BLOCK_SIZE / sizeof(u32);
52 memcpy(cdesc->control_data.token, req->iv, AES_BLOCK_SIZE);
54 cdesc->control_data.options |= EIP197_OPTION_4_TOKEN_IV_CMD;
57 token = (struct safexcel_token *)(cdesc->control_data.token + offset);
59 token[0].opcode = EIP197_TOKEN_OPCODE_DIRECTION;
60 token[0].packet_length = length;
61 token[0].stat = EIP197_TOKEN_STAT_LAST_PACKET;
62 token[0].instructions = EIP197_TOKEN_INS_LAST |
63 EIP197_TOKEN_INS_TYPE_CRYTO |
64 EIP197_TOKEN_INS_TYPE_OUTPUT;
67 static int safexcel_aes_setkey(struct crypto_skcipher *ctfm, const u8 *key,
68 unsigned int len)
70 struct crypto_tfm *tfm = crypto_skcipher_tfm(ctfm);
71 struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
72 struct safexcel_crypto_priv *priv = ctx->priv;
73 struct crypto_aes_ctx aes;
74 int ret, i;
76 ret = crypto_aes_expand_key(&aes, key, len);
77 if (ret) {
78 crypto_skcipher_set_flags(ctfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
79 return ret;
82 if (priv->version == EIP197 && ctx->base.ctxr_dma) {
83 for (i = 0; i < len / sizeof(u32); i++) {
84 if (ctx->key[i] != cpu_to_le32(aes.key_enc[i])) {
85 ctx->base.needs_inv = true;
86 break;
91 for (i = 0; i < len / sizeof(u32); i++)
92 ctx->key[i] = cpu_to_le32(aes.key_enc[i]);
94 ctx->key_len = len;
96 memzero_explicit(&aes, sizeof(aes));
97 return 0;
100 static int safexcel_context_control(struct safexcel_cipher_ctx *ctx,
101 struct crypto_async_request *async,
102 struct safexcel_command_desc *cdesc)
104 struct safexcel_crypto_priv *priv = ctx->priv;
105 struct skcipher_request *req = skcipher_request_cast(async);
106 struct safexcel_cipher_req *sreq = skcipher_request_ctx(req);
107 int ctrl_size;
109 if (sreq->direction == SAFEXCEL_ENCRYPT)
110 cdesc->control_data.control0 |= CONTEXT_CONTROL_TYPE_CRYPTO_OUT;
111 else
112 cdesc->control_data.control0 |= CONTEXT_CONTROL_TYPE_CRYPTO_IN;
114 cdesc->control_data.control0 |= CONTEXT_CONTROL_KEY_EN;
115 cdesc->control_data.control1 |= ctx->mode;
117 switch (ctx->key_len) {
118 case AES_KEYSIZE_128:
119 cdesc->control_data.control0 |= CONTEXT_CONTROL_CRYPTO_ALG_AES128;
120 ctrl_size = 4;
121 break;
122 case AES_KEYSIZE_192:
123 cdesc->control_data.control0 |= CONTEXT_CONTROL_CRYPTO_ALG_AES192;
124 ctrl_size = 6;
125 break;
126 case AES_KEYSIZE_256:
127 cdesc->control_data.control0 |= CONTEXT_CONTROL_CRYPTO_ALG_AES256;
128 ctrl_size = 8;
129 break;
130 default:
131 dev_err(priv->dev, "aes keysize not supported: %u\n",
132 ctx->key_len);
133 return -EINVAL;
135 cdesc->control_data.control0 |= CONTEXT_CONTROL_SIZE(ctrl_size);
137 return 0;
140 static int safexcel_handle_req_result(struct safexcel_crypto_priv *priv, int ring,
141 struct crypto_async_request *async,
142 bool *should_complete, int *ret)
144 struct skcipher_request *req = skcipher_request_cast(async);
145 struct safexcel_result_desc *rdesc;
146 int ndesc = 0;
148 *ret = 0;
150 spin_lock_bh(&priv->ring[ring].egress_lock);
151 do {
152 rdesc = safexcel_ring_next_rptr(priv, &priv->ring[ring].rdr);
153 if (IS_ERR(rdesc)) {
154 dev_err(priv->dev,
155 "cipher: result: could not retrieve the result descriptor\n");
156 *ret = PTR_ERR(rdesc);
157 break;
160 if (rdesc->result_data.error_code) {
161 dev_err(priv->dev,
162 "cipher: result: result descriptor error (%d)\n",
163 rdesc->result_data.error_code);
164 *ret = -EIO;
167 ndesc++;
168 } while (!rdesc->last_seg);
170 safexcel_complete(priv, ring);
171 spin_unlock_bh(&priv->ring[ring].egress_lock);
173 if (req->src == req->dst) {
174 dma_unmap_sg(priv->dev, req->src,
175 sg_nents_for_len(req->src, req->cryptlen),
176 DMA_BIDIRECTIONAL);
177 } else {
178 dma_unmap_sg(priv->dev, req->src,
179 sg_nents_for_len(req->src, req->cryptlen),
180 DMA_TO_DEVICE);
181 dma_unmap_sg(priv->dev, req->dst,
182 sg_nents_for_len(req->dst, req->cryptlen),
183 DMA_FROM_DEVICE);
186 *should_complete = true;
188 return ndesc;
191 static int safexcel_aes_send(struct crypto_async_request *async,
192 int ring, struct safexcel_request *request,
193 int *commands, int *results)
195 struct skcipher_request *req = skcipher_request_cast(async);
196 struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
197 struct safexcel_crypto_priv *priv = ctx->priv;
198 struct safexcel_command_desc *cdesc;
199 struct safexcel_result_desc *rdesc;
200 struct scatterlist *sg;
201 int nr_src, nr_dst, n_cdesc = 0, n_rdesc = 0, queued = req->cryptlen;
202 int i, ret = 0;
204 if (req->src == req->dst) {
205 nr_src = dma_map_sg(priv->dev, req->src,
206 sg_nents_for_len(req->src, req->cryptlen),
207 DMA_BIDIRECTIONAL);
208 nr_dst = nr_src;
209 if (!nr_src)
210 return -EINVAL;
211 } else {
212 nr_src = dma_map_sg(priv->dev, req->src,
213 sg_nents_for_len(req->src, req->cryptlen),
214 DMA_TO_DEVICE);
215 if (!nr_src)
216 return -EINVAL;
218 nr_dst = dma_map_sg(priv->dev, req->dst,
219 sg_nents_for_len(req->dst, req->cryptlen),
220 DMA_FROM_DEVICE);
221 if (!nr_dst) {
222 dma_unmap_sg(priv->dev, req->src,
223 sg_nents_for_len(req->src, req->cryptlen),
224 DMA_TO_DEVICE);
225 return -EINVAL;
229 memcpy(ctx->base.ctxr->data, ctx->key, ctx->key_len);
231 spin_lock_bh(&priv->ring[ring].egress_lock);
233 /* command descriptors */
234 for_each_sg(req->src, sg, nr_src, i) {
235 int len = sg_dma_len(sg);
237 /* Do not overflow the request */
238 if (queued - len < 0)
239 len = queued;
241 cdesc = safexcel_add_cdesc(priv, ring, !n_cdesc, !(queued - len),
242 sg_dma_address(sg), len, req->cryptlen,
243 ctx->base.ctxr_dma);
244 if (IS_ERR(cdesc)) {
245 /* No space left in the command descriptor ring */
246 ret = PTR_ERR(cdesc);
247 goto cdesc_rollback;
249 n_cdesc++;
251 if (n_cdesc == 1) {
252 safexcel_context_control(ctx, async, cdesc);
253 safexcel_cipher_token(ctx, async, cdesc, req->cryptlen);
256 queued -= len;
257 if (!queued)
258 break;
261 /* result descriptors */
262 for_each_sg(req->dst, sg, nr_dst, i) {
263 bool first = !i, last = (i == nr_dst - 1);
264 u32 len = sg_dma_len(sg);
266 rdesc = safexcel_add_rdesc(priv, ring, first, last,
267 sg_dma_address(sg), len);
268 if (IS_ERR(rdesc)) {
269 /* No space left in the result descriptor ring */
270 ret = PTR_ERR(rdesc);
271 goto rdesc_rollback;
273 n_rdesc++;
276 spin_unlock_bh(&priv->ring[ring].egress_lock);
278 request->req = &req->base;
280 *commands = n_cdesc;
281 *results = n_rdesc;
282 return 0;
284 rdesc_rollback:
285 for (i = 0; i < n_rdesc; i++)
286 safexcel_ring_rollback_wptr(priv, &priv->ring[ring].rdr);
287 cdesc_rollback:
288 for (i = 0; i < n_cdesc; i++)
289 safexcel_ring_rollback_wptr(priv, &priv->ring[ring].cdr);
291 spin_unlock_bh(&priv->ring[ring].egress_lock);
293 if (req->src == req->dst) {
294 dma_unmap_sg(priv->dev, req->src,
295 sg_nents_for_len(req->src, req->cryptlen),
296 DMA_BIDIRECTIONAL);
297 } else {
298 dma_unmap_sg(priv->dev, req->src,
299 sg_nents_for_len(req->src, req->cryptlen),
300 DMA_TO_DEVICE);
301 dma_unmap_sg(priv->dev, req->dst,
302 sg_nents_for_len(req->dst, req->cryptlen),
303 DMA_FROM_DEVICE);
306 return ret;
309 static int safexcel_handle_inv_result(struct safexcel_crypto_priv *priv,
310 int ring,
311 struct crypto_async_request *async,
312 bool *should_complete, int *ret)
314 struct skcipher_request *req = skcipher_request_cast(async);
315 struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
316 struct safexcel_result_desc *rdesc;
317 int ndesc = 0, enq_ret;
319 *ret = 0;
321 spin_lock_bh(&priv->ring[ring].egress_lock);
322 do {
323 rdesc = safexcel_ring_next_rptr(priv, &priv->ring[ring].rdr);
324 if (IS_ERR(rdesc)) {
325 dev_err(priv->dev,
326 "cipher: invalidate: could not retrieve the result descriptor\n");
327 *ret = PTR_ERR(rdesc);
328 break;
331 if (rdesc->result_data.error_code) {
332 dev_err(priv->dev, "cipher: invalidate: result descriptor error (%d)\n",
333 rdesc->result_data.error_code);
334 *ret = -EIO;
337 ndesc++;
338 } while (!rdesc->last_seg);
340 safexcel_complete(priv, ring);
341 spin_unlock_bh(&priv->ring[ring].egress_lock);
343 if (ctx->base.exit_inv) {
344 dma_pool_free(priv->context_pool, ctx->base.ctxr,
345 ctx->base.ctxr_dma);
347 *should_complete = true;
349 return ndesc;
352 ring = safexcel_select_ring(priv);
353 ctx->base.ring = ring;
355 spin_lock_bh(&priv->ring[ring].queue_lock);
356 enq_ret = crypto_enqueue_request(&priv->ring[ring].queue, async);
357 spin_unlock_bh(&priv->ring[ring].queue_lock);
359 if (enq_ret != -EINPROGRESS)
360 *ret = enq_ret;
362 queue_work(priv->ring[ring].workqueue,
363 &priv->ring[ring].work_data.work);
365 *should_complete = false;
367 return ndesc;
370 static int safexcel_handle_result(struct safexcel_crypto_priv *priv, int ring,
371 struct crypto_async_request *async,
372 bool *should_complete, int *ret)
374 struct skcipher_request *req = skcipher_request_cast(async);
375 struct safexcel_cipher_req *sreq = skcipher_request_ctx(req);
376 int err;
378 if (sreq->needs_inv) {
379 sreq->needs_inv = false;
380 err = safexcel_handle_inv_result(priv, ring, async,
381 should_complete, ret);
382 } else {
383 err = safexcel_handle_req_result(priv, ring, async,
384 should_complete, ret);
387 return err;
390 static int safexcel_cipher_send_inv(struct crypto_async_request *async,
391 int ring, struct safexcel_request *request,
392 int *commands, int *results)
394 struct skcipher_request *req = skcipher_request_cast(async);
395 struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
396 struct safexcel_crypto_priv *priv = ctx->priv;
397 int ret;
399 ret = safexcel_invalidate_cache(async, priv,
400 ctx->base.ctxr_dma, ring, request);
401 if (unlikely(ret))
402 return ret;
404 *commands = 1;
405 *results = 1;
407 return 0;
410 static int safexcel_send(struct crypto_async_request *async,
411 int ring, struct safexcel_request *request,
412 int *commands, int *results)
414 struct skcipher_request *req = skcipher_request_cast(async);
415 struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
416 struct safexcel_cipher_req *sreq = skcipher_request_ctx(req);
417 struct safexcel_crypto_priv *priv = ctx->priv;
418 int ret;
420 BUG_ON(priv->version == EIP97 && sreq->needs_inv);
422 if (sreq->needs_inv)
423 ret = safexcel_cipher_send_inv(async, ring, request,
424 commands, results);
425 else
426 ret = safexcel_aes_send(async, ring, request,
427 commands, results);
428 return ret;
431 static int safexcel_cipher_exit_inv(struct crypto_tfm *tfm)
433 struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
434 struct safexcel_crypto_priv *priv = ctx->priv;
435 SKCIPHER_REQUEST_ON_STACK(req, __crypto_skcipher_cast(tfm));
436 struct safexcel_cipher_req *sreq = skcipher_request_ctx(req);
437 struct safexcel_inv_result result = {};
438 int ring = ctx->base.ring;
440 memset(req, 0, sizeof(struct skcipher_request));
442 /* create invalidation request */
443 init_completion(&result.completion);
444 skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
445 safexcel_inv_complete, &result);
447 skcipher_request_set_tfm(req, __crypto_skcipher_cast(tfm));
448 ctx = crypto_tfm_ctx(req->base.tfm);
449 ctx->base.exit_inv = true;
450 sreq->needs_inv = true;
452 spin_lock_bh(&priv->ring[ring].queue_lock);
453 crypto_enqueue_request(&priv->ring[ring].queue, &req->base);
454 spin_unlock_bh(&priv->ring[ring].queue_lock);
456 queue_work(priv->ring[ring].workqueue,
457 &priv->ring[ring].work_data.work);
459 wait_for_completion_interruptible(&result.completion);
461 if (result.error) {
462 dev_warn(priv->dev,
463 "cipher: sync: invalidate: completion error %d\n",
464 result.error);
465 return result.error;
468 return 0;
471 static int safexcel_aes(struct skcipher_request *req,
472 enum safexcel_cipher_direction dir, u32 mode)
474 struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
475 struct safexcel_cipher_req *sreq = skcipher_request_ctx(req);
476 struct safexcel_crypto_priv *priv = ctx->priv;
477 int ret, ring;
479 sreq->needs_inv = false;
480 sreq->direction = dir;
481 ctx->mode = mode;
483 if (ctx->base.ctxr) {
484 if (priv->version == EIP197 && ctx->base.needs_inv) {
485 sreq->needs_inv = true;
486 ctx->base.needs_inv = false;
488 } else {
489 ctx->base.ring = safexcel_select_ring(priv);
490 ctx->base.ctxr = dma_pool_zalloc(priv->context_pool,
491 EIP197_GFP_FLAGS(req->base),
492 &ctx->base.ctxr_dma);
493 if (!ctx->base.ctxr)
494 return -ENOMEM;
497 ring = ctx->base.ring;
499 spin_lock_bh(&priv->ring[ring].queue_lock);
500 ret = crypto_enqueue_request(&priv->ring[ring].queue, &req->base);
501 spin_unlock_bh(&priv->ring[ring].queue_lock);
503 queue_work(priv->ring[ring].workqueue,
504 &priv->ring[ring].work_data.work);
506 return ret;
509 static int safexcel_ecb_aes_encrypt(struct skcipher_request *req)
511 return safexcel_aes(req, SAFEXCEL_ENCRYPT,
512 CONTEXT_CONTROL_CRYPTO_MODE_ECB);
515 static int safexcel_ecb_aes_decrypt(struct skcipher_request *req)
517 return safexcel_aes(req, SAFEXCEL_DECRYPT,
518 CONTEXT_CONTROL_CRYPTO_MODE_ECB);
521 static int safexcel_skcipher_cra_init(struct crypto_tfm *tfm)
523 struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
524 struct safexcel_alg_template *tmpl =
525 container_of(tfm->__crt_alg, struct safexcel_alg_template,
526 alg.skcipher.base);
528 ctx->priv = tmpl->priv;
529 ctx->base.send = safexcel_send;
530 ctx->base.handle_result = safexcel_handle_result;
532 crypto_skcipher_set_reqsize(__crypto_skcipher_cast(tfm),
533 sizeof(struct safexcel_cipher_req));
535 return 0;
538 static void safexcel_skcipher_cra_exit(struct crypto_tfm *tfm)
540 struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
541 struct safexcel_crypto_priv *priv = ctx->priv;
542 int ret;
544 memzero_explicit(ctx->key, 8 * sizeof(u32));
546 /* context not allocated, skip invalidation */
547 if (!ctx->base.ctxr)
548 return;
550 memzero_explicit(ctx->base.ctxr->data, 8 * sizeof(u32));
552 if (priv->version == EIP197) {
553 ret = safexcel_cipher_exit_inv(tfm);
554 if (ret)
555 dev_warn(priv->dev, "cipher: invalidation error %d\n", ret);
556 } else {
557 dma_pool_free(priv->context_pool, ctx->base.ctxr,
558 ctx->base.ctxr_dma);
562 struct safexcel_alg_template safexcel_alg_ecb_aes = {
563 .type = SAFEXCEL_ALG_TYPE_SKCIPHER,
564 .alg.skcipher = {
565 .setkey = safexcel_aes_setkey,
566 .encrypt = safexcel_ecb_aes_encrypt,
567 .decrypt = safexcel_ecb_aes_decrypt,
568 .min_keysize = AES_MIN_KEY_SIZE,
569 .max_keysize = AES_MAX_KEY_SIZE,
570 .base = {
571 .cra_name = "ecb(aes)",
572 .cra_driver_name = "safexcel-ecb-aes",
573 .cra_priority = 300,
574 .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_ASYNC |
575 CRYPTO_ALG_KERN_DRIVER_ONLY,
576 .cra_blocksize = AES_BLOCK_SIZE,
577 .cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
578 .cra_alignmask = 0,
579 .cra_init = safexcel_skcipher_cra_init,
580 .cra_exit = safexcel_skcipher_cra_exit,
581 .cra_module = THIS_MODULE,
586 static int safexcel_cbc_aes_encrypt(struct skcipher_request *req)
588 return safexcel_aes(req, SAFEXCEL_ENCRYPT,
589 CONTEXT_CONTROL_CRYPTO_MODE_CBC);
592 static int safexcel_cbc_aes_decrypt(struct skcipher_request *req)
594 return safexcel_aes(req, SAFEXCEL_DECRYPT,
595 CONTEXT_CONTROL_CRYPTO_MODE_CBC);
598 struct safexcel_alg_template safexcel_alg_cbc_aes = {
599 .type = SAFEXCEL_ALG_TYPE_SKCIPHER,
600 .alg.skcipher = {
601 .setkey = safexcel_aes_setkey,
602 .encrypt = safexcel_cbc_aes_encrypt,
603 .decrypt = safexcel_cbc_aes_decrypt,
604 .min_keysize = AES_MIN_KEY_SIZE,
605 .max_keysize = AES_MAX_KEY_SIZE,
606 .ivsize = AES_BLOCK_SIZE,
607 .base = {
608 .cra_name = "cbc(aes)",
609 .cra_driver_name = "safexcel-cbc-aes",
610 .cra_priority = 300,
611 .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_ASYNC |
612 CRYPTO_ALG_KERN_DRIVER_ONLY,
613 .cra_blocksize = AES_BLOCK_SIZE,
614 .cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
615 .cra_alignmask = 0,
616 .cra_init = safexcel_skcipher_cra_init,
617 .cra_exit = safexcel_skcipher_cra_exit,
618 .cra_module = THIS_MODULE,