Linux 4.1.16
[linux/fpc-iii.git] / drivers / crypto / mv_cesa.c
blobf91f15ddee926ffeb7d470d3823f699194bc9d40
1 /*
2 * Support for Marvell's crypto engine which can be found on some Orion5X
3 * boards.
5 * Author: Sebastian Andrzej Siewior < sebastian at breakpoint dot cc >
6 * License: GPLv2
8 */
9 #include <crypto/aes.h>
10 #include <crypto/algapi.h>
11 #include <linux/crypto.h>
12 #include <linux/interrupt.h>
13 #include <linux/io.h>
14 #include <linux/kthread.h>
15 #include <linux/platform_device.h>
16 #include <linux/scatterlist.h>
17 #include <linux/slab.h>
18 #include <linux/module.h>
19 #include <linux/clk.h>
20 #include <crypto/internal/hash.h>
21 #include <crypto/sha.h>
22 #include <linux/of.h>
23 #include <linux/of_platform.h>
24 #include <linux/of_irq.h>
26 #include "mv_cesa.h"
28 #define MV_CESA "MV-CESA:"
29 #define MAX_HW_HASH_SIZE 0xFFFF
30 #define MV_CESA_EXPIRE 500 /* msec */
33 * STM:
34 * /---------------------------------------\
35 * | | request complete
36 * \./ |
37 * IDLE -> new request -> BUSY -> done -> DEQUEUE
38 * /°\ |
39 * | | more scatter entries
40 * \________________/
42 enum engine_status {
43 ENGINE_IDLE,
44 ENGINE_BUSY,
45 ENGINE_W_DEQUEUE,
48 /**
49 * struct req_progress - used for every crypt request
50 * @src_sg_it: sg iterator for src
51 * @dst_sg_it: sg iterator for dst
52 * @sg_src_left: bytes left in src to process (scatter list)
53 * @src_start: offset to add to src start position (scatter list)
54 * @crypt_len: length of current hw crypt/hash process
55 * @hw_nbytes: total bytes to process in hw for this request
56 * @copy_back: whether to copy data back (crypt) or not (hash)
57 * @sg_dst_left: bytes left dst to process in this scatter list
58 * @dst_start: offset to add to dst start position (scatter list)
59 * @hw_processed_bytes: number of bytes processed by hw (request).
61 * sg helper are used to iterate over the scatterlist. Since the size of the
62 * SRAM may be less than the scatter size, this struct struct is used to keep
63 * track of progress within current scatterlist.
65 struct req_progress {
66 struct sg_mapping_iter src_sg_it;
67 struct sg_mapping_iter dst_sg_it;
68 void (*complete) (void);
69 void (*process) (int is_first);
71 /* src mostly */
72 int sg_src_left;
73 int src_start;
74 int crypt_len;
75 int hw_nbytes;
76 /* dst mostly */
77 int copy_back;
78 int sg_dst_left;
79 int dst_start;
80 int hw_processed_bytes;
83 struct crypto_priv {
84 void __iomem *reg;
85 void __iomem *sram;
86 int irq;
87 struct clk *clk;
88 struct task_struct *queue_th;
90 /* the lock protects queue and eng_st */
91 spinlock_t lock;
92 struct crypto_queue queue;
93 enum engine_status eng_st;
94 struct timer_list completion_timer;
95 struct crypto_async_request *cur_req;
96 struct req_progress p;
97 int max_req_size;
98 int sram_size;
99 int has_sha1;
100 int has_hmac_sha1;
103 static struct crypto_priv *cpg;
105 struct mv_ctx {
106 u8 aes_enc_key[AES_KEY_LEN];
107 u32 aes_dec_key[8];
108 int key_len;
109 u32 need_calc_aes_dkey;
112 enum crypto_op {
113 COP_AES_ECB,
114 COP_AES_CBC,
117 struct mv_req_ctx {
118 enum crypto_op op;
119 int decrypt;
122 enum hash_op {
123 COP_SHA1,
124 COP_HMAC_SHA1
127 struct mv_tfm_hash_ctx {
128 struct crypto_shash *fallback;
129 struct crypto_shash *base_hash;
130 u32 ivs[2 * SHA1_DIGEST_SIZE / 4];
131 int count_add;
132 enum hash_op op;
135 struct mv_req_hash_ctx {
136 u64 count;
137 u32 state[SHA1_DIGEST_SIZE / 4];
138 u8 buffer[SHA1_BLOCK_SIZE];
139 int first_hash; /* marks that we don't have previous state */
140 int last_chunk; /* marks that this is the 'final' request */
141 int extra_bytes; /* unprocessed bytes in buffer */
142 enum hash_op op;
143 int count_add;
146 static void mv_completion_timer_callback(unsigned long unused)
148 int active = readl(cpg->reg + SEC_ACCEL_CMD) & SEC_CMD_EN_SEC_ACCL0;
150 printk(KERN_ERR MV_CESA
151 "completion timer expired (CESA %sactive), cleaning up.\n",
152 active ? "" : "in");
154 del_timer(&cpg->completion_timer);
155 writel(SEC_CMD_DISABLE_SEC, cpg->reg + SEC_ACCEL_CMD);
156 while(readl(cpg->reg + SEC_ACCEL_CMD) & SEC_CMD_DISABLE_SEC)
157 printk(KERN_INFO MV_CESA "%s: waiting for engine finishing\n", __func__);
158 cpg->eng_st = ENGINE_W_DEQUEUE;
159 wake_up_process(cpg->queue_th);
162 static void mv_setup_timer(void)
164 setup_timer(&cpg->completion_timer, &mv_completion_timer_callback, 0);
165 mod_timer(&cpg->completion_timer,
166 jiffies + msecs_to_jiffies(MV_CESA_EXPIRE));
169 static void compute_aes_dec_key(struct mv_ctx *ctx)
171 struct crypto_aes_ctx gen_aes_key;
172 int key_pos;
174 if (!ctx->need_calc_aes_dkey)
175 return;
177 crypto_aes_expand_key(&gen_aes_key, ctx->aes_enc_key, ctx->key_len);
179 key_pos = ctx->key_len + 24;
180 memcpy(ctx->aes_dec_key, &gen_aes_key.key_enc[key_pos], 4 * 4);
181 switch (ctx->key_len) {
182 case AES_KEYSIZE_256:
183 key_pos -= 2;
184 /* fall */
185 case AES_KEYSIZE_192:
186 key_pos -= 2;
187 memcpy(&ctx->aes_dec_key[4], &gen_aes_key.key_enc[key_pos],
188 4 * 4);
189 break;
191 ctx->need_calc_aes_dkey = 0;
194 static int mv_setkey_aes(struct crypto_ablkcipher *cipher, const u8 *key,
195 unsigned int len)
197 struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
198 struct mv_ctx *ctx = crypto_tfm_ctx(tfm);
200 switch (len) {
201 case AES_KEYSIZE_128:
202 case AES_KEYSIZE_192:
203 case AES_KEYSIZE_256:
204 break;
205 default:
206 crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
207 return -EINVAL;
209 ctx->key_len = len;
210 ctx->need_calc_aes_dkey = 1;
212 memcpy(ctx->aes_enc_key, key, AES_KEY_LEN);
213 return 0;
216 static void copy_src_to_buf(struct req_progress *p, char *dbuf, int len)
218 int ret;
219 void *sbuf;
220 int copy_len;
222 while (len) {
223 if (!p->sg_src_left) {
224 ret = sg_miter_next(&p->src_sg_it);
225 BUG_ON(!ret);
226 p->sg_src_left = p->src_sg_it.length;
227 p->src_start = 0;
230 sbuf = p->src_sg_it.addr + p->src_start;
232 copy_len = min(p->sg_src_left, len);
233 memcpy(dbuf, sbuf, copy_len);
235 p->src_start += copy_len;
236 p->sg_src_left -= copy_len;
238 len -= copy_len;
239 dbuf += copy_len;
243 static void setup_data_in(void)
245 struct req_progress *p = &cpg->p;
246 int data_in_sram =
247 min(p->hw_nbytes - p->hw_processed_bytes, cpg->max_req_size);
248 copy_src_to_buf(p, cpg->sram + SRAM_DATA_IN_START + p->crypt_len,
249 data_in_sram - p->crypt_len);
250 p->crypt_len = data_in_sram;
253 static void mv_process_current_q(int first_block)
255 struct ablkcipher_request *req = ablkcipher_request_cast(cpg->cur_req);
256 struct mv_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
257 struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req);
258 struct sec_accel_config op;
260 switch (req_ctx->op) {
261 case COP_AES_ECB:
262 op.config = CFG_OP_CRYPT_ONLY | CFG_ENCM_AES | CFG_ENC_MODE_ECB;
263 break;
264 case COP_AES_CBC:
265 default:
266 op.config = CFG_OP_CRYPT_ONLY | CFG_ENCM_AES | CFG_ENC_MODE_CBC;
267 op.enc_iv = ENC_IV_POINT(SRAM_DATA_IV) |
268 ENC_IV_BUF_POINT(SRAM_DATA_IV_BUF);
269 if (first_block)
270 memcpy(cpg->sram + SRAM_DATA_IV, req->info, 16);
271 break;
273 if (req_ctx->decrypt) {
274 op.config |= CFG_DIR_DEC;
275 memcpy(cpg->sram + SRAM_DATA_KEY_P, ctx->aes_dec_key,
276 AES_KEY_LEN);
277 } else {
278 op.config |= CFG_DIR_ENC;
279 memcpy(cpg->sram + SRAM_DATA_KEY_P, ctx->aes_enc_key,
280 AES_KEY_LEN);
283 switch (ctx->key_len) {
284 case AES_KEYSIZE_128:
285 op.config |= CFG_AES_LEN_128;
286 break;
287 case AES_KEYSIZE_192:
288 op.config |= CFG_AES_LEN_192;
289 break;
290 case AES_KEYSIZE_256:
291 op.config |= CFG_AES_LEN_256;
292 break;
294 op.enc_p = ENC_P_SRC(SRAM_DATA_IN_START) |
295 ENC_P_DST(SRAM_DATA_OUT_START);
296 op.enc_key_p = SRAM_DATA_KEY_P;
298 setup_data_in();
299 op.enc_len = cpg->p.crypt_len;
300 memcpy(cpg->sram + SRAM_CONFIG, &op,
301 sizeof(struct sec_accel_config));
303 /* GO */
304 mv_setup_timer();
305 writel(SEC_CMD_EN_SEC_ACCL0, cpg->reg + SEC_ACCEL_CMD);
308 static void mv_crypto_algo_completion(void)
310 struct ablkcipher_request *req = ablkcipher_request_cast(cpg->cur_req);
311 struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req);
313 sg_miter_stop(&cpg->p.src_sg_it);
314 sg_miter_stop(&cpg->p.dst_sg_it);
316 if (req_ctx->op != COP_AES_CBC)
317 return ;
319 memcpy(req->info, cpg->sram + SRAM_DATA_IV_BUF, 16);
322 static void mv_process_hash_current(int first_block)
324 struct ahash_request *req = ahash_request_cast(cpg->cur_req);
325 const struct mv_tfm_hash_ctx *tfm_ctx = crypto_tfm_ctx(req->base.tfm);
326 struct mv_req_hash_ctx *req_ctx = ahash_request_ctx(req);
327 struct req_progress *p = &cpg->p;
328 struct sec_accel_config op = { 0 };
329 int is_last;
331 switch (req_ctx->op) {
332 case COP_SHA1:
333 default:
334 op.config = CFG_OP_MAC_ONLY | CFG_MACM_SHA1;
335 break;
336 case COP_HMAC_SHA1:
337 op.config = CFG_OP_MAC_ONLY | CFG_MACM_HMAC_SHA1;
338 memcpy(cpg->sram + SRAM_HMAC_IV_IN,
339 tfm_ctx->ivs, sizeof(tfm_ctx->ivs));
340 break;
343 op.mac_src_p =
344 MAC_SRC_DATA_P(SRAM_DATA_IN_START) | MAC_SRC_TOTAL_LEN((u32)
345 req_ctx->
346 count);
348 setup_data_in();
350 op.mac_digest =
351 MAC_DIGEST_P(SRAM_DIGEST_BUF) | MAC_FRAG_LEN(p->crypt_len);
352 op.mac_iv =
353 MAC_INNER_IV_P(SRAM_HMAC_IV_IN) |
354 MAC_OUTER_IV_P(SRAM_HMAC_IV_OUT);
356 is_last = req_ctx->last_chunk
357 && (p->hw_processed_bytes + p->crypt_len >= p->hw_nbytes)
358 && (req_ctx->count <= MAX_HW_HASH_SIZE);
359 if (req_ctx->first_hash) {
360 if (is_last)
361 op.config |= CFG_NOT_FRAG;
362 else
363 op.config |= CFG_FIRST_FRAG;
365 req_ctx->first_hash = 0;
366 } else {
367 if (is_last)
368 op.config |= CFG_LAST_FRAG;
369 else
370 op.config |= CFG_MID_FRAG;
372 if (first_block) {
373 writel(req_ctx->state[0], cpg->reg + DIGEST_INITIAL_VAL_A);
374 writel(req_ctx->state[1], cpg->reg + DIGEST_INITIAL_VAL_B);
375 writel(req_ctx->state[2], cpg->reg + DIGEST_INITIAL_VAL_C);
376 writel(req_ctx->state[3], cpg->reg + DIGEST_INITIAL_VAL_D);
377 writel(req_ctx->state[4], cpg->reg + DIGEST_INITIAL_VAL_E);
381 memcpy(cpg->sram + SRAM_CONFIG, &op, sizeof(struct sec_accel_config));
383 /* GO */
384 mv_setup_timer();
385 writel(SEC_CMD_EN_SEC_ACCL0, cpg->reg + SEC_ACCEL_CMD);
388 static inline int mv_hash_import_sha1_ctx(const struct mv_req_hash_ctx *ctx,
389 struct shash_desc *desc)
391 int i;
392 struct sha1_state shash_state;
394 shash_state.count = ctx->count + ctx->count_add;
395 for (i = 0; i < 5; i++)
396 shash_state.state[i] = ctx->state[i];
397 memcpy(shash_state.buffer, ctx->buffer, sizeof(shash_state.buffer));
398 return crypto_shash_import(desc, &shash_state);
401 static int mv_hash_final_fallback(struct ahash_request *req)
403 const struct mv_tfm_hash_ctx *tfm_ctx = crypto_tfm_ctx(req->base.tfm);
404 struct mv_req_hash_ctx *req_ctx = ahash_request_ctx(req);
405 SHASH_DESC_ON_STACK(shash, tfm_ctx->fallback);
406 int rc;
408 shash->tfm = tfm_ctx->fallback;
409 shash->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
410 if (unlikely(req_ctx->first_hash)) {
411 crypto_shash_init(shash);
412 crypto_shash_update(shash, req_ctx->buffer,
413 req_ctx->extra_bytes);
414 } else {
415 /* only SHA1 for now....
417 rc = mv_hash_import_sha1_ctx(req_ctx, shash);
418 if (rc)
419 goto out;
421 rc = crypto_shash_final(shash, req->result);
422 out:
423 return rc;
426 static void mv_save_digest_state(struct mv_req_hash_ctx *ctx)
428 ctx->state[0] = readl(cpg->reg + DIGEST_INITIAL_VAL_A);
429 ctx->state[1] = readl(cpg->reg + DIGEST_INITIAL_VAL_B);
430 ctx->state[2] = readl(cpg->reg + DIGEST_INITIAL_VAL_C);
431 ctx->state[3] = readl(cpg->reg + DIGEST_INITIAL_VAL_D);
432 ctx->state[4] = readl(cpg->reg + DIGEST_INITIAL_VAL_E);
435 static void mv_hash_algo_completion(void)
437 struct ahash_request *req = ahash_request_cast(cpg->cur_req);
438 struct mv_req_hash_ctx *ctx = ahash_request_ctx(req);
440 if (ctx->extra_bytes)
441 copy_src_to_buf(&cpg->p, ctx->buffer, ctx->extra_bytes);
442 sg_miter_stop(&cpg->p.src_sg_it);
444 if (likely(ctx->last_chunk)) {
445 if (likely(ctx->count <= MAX_HW_HASH_SIZE)) {
446 memcpy(req->result, cpg->sram + SRAM_DIGEST_BUF,
447 crypto_ahash_digestsize(crypto_ahash_reqtfm
448 (req)));
449 } else {
450 mv_save_digest_state(ctx);
451 mv_hash_final_fallback(req);
453 } else {
454 mv_save_digest_state(ctx);
458 static void dequeue_complete_req(void)
460 struct crypto_async_request *req = cpg->cur_req;
461 void *buf;
462 int ret;
463 cpg->p.hw_processed_bytes += cpg->p.crypt_len;
464 if (cpg->p.copy_back) {
465 int need_copy_len = cpg->p.crypt_len;
466 int sram_offset = 0;
467 do {
468 int dst_copy;
470 if (!cpg->p.sg_dst_left) {
471 ret = sg_miter_next(&cpg->p.dst_sg_it);
472 BUG_ON(!ret);
473 cpg->p.sg_dst_left = cpg->p.dst_sg_it.length;
474 cpg->p.dst_start = 0;
477 buf = cpg->p.dst_sg_it.addr;
478 buf += cpg->p.dst_start;
480 dst_copy = min(need_copy_len, cpg->p.sg_dst_left);
482 memcpy(buf,
483 cpg->sram + SRAM_DATA_OUT_START + sram_offset,
484 dst_copy);
485 sram_offset += dst_copy;
486 cpg->p.sg_dst_left -= dst_copy;
487 need_copy_len -= dst_copy;
488 cpg->p.dst_start += dst_copy;
489 } while (need_copy_len > 0);
492 cpg->p.crypt_len = 0;
494 BUG_ON(cpg->eng_st != ENGINE_W_DEQUEUE);
495 if (cpg->p.hw_processed_bytes < cpg->p.hw_nbytes) {
496 /* process next scatter list entry */
497 cpg->eng_st = ENGINE_BUSY;
498 cpg->p.process(0);
499 } else {
500 cpg->p.complete();
501 cpg->eng_st = ENGINE_IDLE;
502 local_bh_disable();
503 req->complete(req, 0);
504 local_bh_enable();
508 static int count_sgs(struct scatterlist *sl, unsigned int total_bytes)
510 int i = 0;
511 size_t cur_len;
513 while (sl) {
514 cur_len = sl[i].length;
515 ++i;
516 if (total_bytes > cur_len)
517 total_bytes -= cur_len;
518 else
519 break;
522 return i;
525 static void mv_start_new_crypt_req(struct ablkcipher_request *req)
527 struct req_progress *p = &cpg->p;
528 int num_sgs;
530 cpg->cur_req = &req->base;
531 memset(p, 0, sizeof(struct req_progress));
532 p->hw_nbytes = req->nbytes;
533 p->complete = mv_crypto_algo_completion;
534 p->process = mv_process_current_q;
535 p->copy_back = 1;
537 num_sgs = count_sgs(req->src, req->nbytes);
538 sg_miter_start(&p->src_sg_it, req->src, num_sgs, SG_MITER_FROM_SG);
540 num_sgs = count_sgs(req->dst, req->nbytes);
541 sg_miter_start(&p->dst_sg_it, req->dst, num_sgs, SG_MITER_TO_SG);
543 mv_process_current_q(1);
546 static void mv_start_new_hash_req(struct ahash_request *req)
548 struct req_progress *p = &cpg->p;
549 struct mv_req_hash_ctx *ctx = ahash_request_ctx(req);
550 int num_sgs, hw_bytes, old_extra_bytes, rc;
551 cpg->cur_req = &req->base;
552 memset(p, 0, sizeof(struct req_progress));
553 hw_bytes = req->nbytes + ctx->extra_bytes;
554 old_extra_bytes = ctx->extra_bytes;
556 ctx->extra_bytes = hw_bytes % SHA1_BLOCK_SIZE;
557 if (ctx->extra_bytes != 0
558 && (!ctx->last_chunk || ctx->count > MAX_HW_HASH_SIZE))
559 hw_bytes -= ctx->extra_bytes;
560 else
561 ctx->extra_bytes = 0;
563 num_sgs = count_sgs(req->src, req->nbytes);
564 sg_miter_start(&p->src_sg_it, req->src, num_sgs, SG_MITER_FROM_SG);
566 if (hw_bytes) {
567 p->hw_nbytes = hw_bytes;
568 p->complete = mv_hash_algo_completion;
569 p->process = mv_process_hash_current;
571 if (unlikely(old_extra_bytes)) {
572 memcpy(cpg->sram + SRAM_DATA_IN_START, ctx->buffer,
573 old_extra_bytes);
574 p->crypt_len = old_extra_bytes;
577 mv_process_hash_current(1);
578 } else {
579 copy_src_to_buf(p, ctx->buffer + old_extra_bytes,
580 ctx->extra_bytes - old_extra_bytes);
581 sg_miter_stop(&p->src_sg_it);
582 if (ctx->last_chunk)
583 rc = mv_hash_final_fallback(req);
584 else
585 rc = 0;
586 cpg->eng_st = ENGINE_IDLE;
587 local_bh_disable();
588 req->base.complete(&req->base, rc);
589 local_bh_enable();
593 static int queue_manag(void *data)
595 cpg->eng_st = ENGINE_IDLE;
596 do {
597 struct crypto_async_request *async_req = NULL;
598 struct crypto_async_request *backlog;
600 __set_current_state(TASK_INTERRUPTIBLE);
602 if (cpg->eng_st == ENGINE_W_DEQUEUE)
603 dequeue_complete_req();
605 spin_lock_irq(&cpg->lock);
606 if (cpg->eng_st == ENGINE_IDLE) {
607 backlog = crypto_get_backlog(&cpg->queue);
608 async_req = crypto_dequeue_request(&cpg->queue);
609 if (async_req) {
610 BUG_ON(cpg->eng_st != ENGINE_IDLE);
611 cpg->eng_st = ENGINE_BUSY;
614 spin_unlock_irq(&cpg->lock);
616 if (backlog) {
617 backlog->complete(backlog, -EINPROGRESS);
618 backlog = NULL;
621 if (async_req) {
622 if (crypto_tfm_alg_type(async_req->tfm) !=
623 CRYPTO_ALG_TYPE_AHASH) {
624 struct ablkcipher_request *req =
625 ablkcipher_request_cast(async_req);
626 mv_start_new_crypt_req(req);
627 } else {
628 struct ahash_request *req =
629 ahash_request_cast(async_req);
630 mv_start_new_hash_req(req);
632 async_req = NULL;
635 schedule();
637 } while (!kthread_should_stop());
638 return 0;
641 static int mv_handle_req(struct crypto_async_request *req)
643 unsigned long flags;
644 int ret;
646 spin_lock_irqsave(&cpg->lock, flags);
647 ret = crypto_enqueue_request(&cpg->queue, req);
648 spin_unlock_irqrestore(&cpg->lock, flags);
649 wake_up_process(cpg->queue_th);
650 return ret;
653 static int mv_enc_aes_ecb(struct ablkcipher_request *req)
655 struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req);
657 req_ctx->op = COP_AES_ECB;
658 req_ctx->decrypt = 0;
660 return mv_handle_req(&req->base);
663 static int mv_dec_aes_ecb(struct ablkcipher_request *req)
665 struct mv_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
666 struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req);
668 req_ctx->op = COP_AES_ECB;
669 req_ctx->decrypt = 1;
671 compute_aes_dec_key(ctx);
672 return mv_handle_req(&req->base);
675 static int mv_enc_aes_cbc(struct ablkcipher_request *req)
677 struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req);
679 req_ctx->op = COP_AES_CBC;
680 req_ctx->decrypt = 0;
682 return mv_handle_req(&req->base);
685 static int mv_dec_aes_cbc(struct ablkcipher_request *req)
687 struct mv_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
688 struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req);
690 req_ctx->op = COP_AES_CBC;
691 req_ctx->decrypt = 1;
693 compute_aes_dec_key(ctx);
694 return mv_handle_req(&req->base);
697 static int mv_cra_init(struct crypto_tfm *tfm)
699 tfm->crt_ablkcipher.reqsize = sizeof(struct mv_req_ctx);
700 return 0;
703 static void mv_init_hash_req_ctx(struct mv_req_hash_ctx *ctx, int op,
704 int is_last, unsigned int req_len,
705 int count_add)
707 memset(ctx, 0, sizeof(*ctx));
708 ctx->op = op;
709 ctx->count = req_len;
710 ctx->first_hash = 1;
711 ctx->last_chunk = is_last;
712 ctx->count_add = count_add;
715 static void mv_update_hash_req_ctx(struct mv_req_hash_ctx *ctx, int is_last,
716 unsigned req_len)
718 ctx->last_chunk = is_last;
719 ctx->count += req_len;
722 static int mv_hash_init(struct ahash_request *req)
724 const struct mv_tfm_hash_ctx *tfm_ctx = crypto_tfm_ctx(req->base.tfm);
725 mv_init_hash_req_ctx(ahash_request_ctx(req), tfm_ctx->op, 0, 0,
726 tfm_ctx->count_add);
727 return 0;
730 static int mv_hash_update(struct ahash_request *req)
732 if (!req->nbytes)
733 return 0;
735 mv_update_hash_req_ctx(ahash_request_ctx(req), 0, req->nbytes);
736 return mv_handle_req(&req->base);
739 static int mv_hash_final(struct ahash_request *req)
741 struct mv_req_hash_ctx *ctx = ahash_request_ctx(req);
743 ahash_request_set_crypt(req, NULL, req->result, 0);
744 mv_update_hash_req_ctx(ctx, 1, 0);
745 return mv_handle_req(&req->base);
748 static int mv_hash_finup(struct ahash_request *req)
750 mv_update_hash_req_ctx(ahash_request_ctx(req), 1, req->nbytes);
751 return mv_handle_req(&req->base);
754 static int mv_hash_digest(struct ahash_request *req)
756 const struct mv_tfm_hash_ctx *tfm_ctx = crypto_tfm_ctx(req->base.tfm);
757 mv_init_hash_req_ctx(ahash_request_ctx(req), tfm_ctx->op, 1,
758 req->nbytes, tfm_ctx->count_add);
759 return mv_handle_req(&req->base);
762 static void mv_hash_init_ivs(struct mv_tfm_hash_ctx *ctx, const void *istate,
763 const void *ostate)
765 const struct sha1_state *isha1_state = istate, *osha1_state = ostate;
766 int i;
767 for (i = 0; i < 5; i++) {
768 ctx->ivs[i] = cpu_to_be32(isha1_state->state[i]);
769 ctx->ivs[i + 5] = cpu_to_be32(osha1_state->state[i]);
773 static int mv_hash_setkey(struct crypto_ahash *tfm, const u8 * key,
774 unsigned int keylen)
776 int rc;
777 struct mv_tfm_hash_ctx *ctx = crypto_tfm_ctx(&tfm->base);
778 int bs, ds, ss;
780 if (!ctx->base_hash)
781 return 0;
783 rc = crypto_shash_setkey(ctx->fallback, key, keylen);
784 if (rc)
785 return rc;
787 /* Can't see a way to extract the ipad/opad from the fallback tfm
788 so I'm basically copying code from the hmac module */
789 bs = crypto_shash_blocksize(ctx->base_hash);
790 ds = crypto_shash_digestsize(ctx->base_hash);
791 ss = crypto_shash_statesize(ctx->base_hash);
794 SHASH_DESC_ON_STACK(shash, ctx->base_hash);
796 unsigned int i;
797 char ipad[ss];
798 char opad[ss];
800 shash->tfm = ctx->base_hash;
801 shash->flags = crypto_shash_get_flags(ctx->base_hash) &
802 CRYPTO_TFM_REQ_MAY_SLEEP;
804 if (keylen > bs) {
805 int err;
807 err =
808 crypto_shash_digest(shash, key, keylen, ipad);
809 if (err)
810 return err;
812 keylen = ds;
813 } else
814 memcpy(ipad, key, keylen);
816 memset(ipad + keylen, 0, bs - keylen);
817 memcpy(opad, ipad, bs);
819 for (i = 0; i < bs; i++) {
820 ipad[i] ^= 0x36;
821 opad[i] ^= 0x5c;
824 rc = crypto_shash_init(shash) ? :
825 crypto_shash_update(shash, ipad, bs) ? :
826 crypto_shash_export(shash, ipad) ? :
827 crypto_shash_init(shash) ? :
828 crypto_shash_update(shash, opad, bs) ? :
829 crypto_shash_export(shash, opad);
831 if (rc == 0)
832 mv_hash_init_ivs(ctx, ipad, opad);
834 return rc;
838 static int mv_cra_hash_init(struct crypto_tfm *tfm, const char *base_hash_name,
839 enum hash_op op, int count_add)
841 const char *fallback_driver_name = crypto_tfm_alg_name(tfm);
842 struct mv_tfm_hash_ctx *ctx = crypto_tfm_ctx(tfm);
843 struct crypto_shash *fallback_tfm = NULL;
844 struct crypto_shash *base_hash = NULL;
845 int err = -ENOMEM;
847 ctx->op = op;
848 ctx->count_add = count_add;
850 /* Allocate a fallback and abort if it failed. */
851 fallback_tfm = crypto_alloc_shash(fallback_driver_name, 0,
852 CRYPTO_ALG_NEED_FALLBACK);
853 if (IS_ERR(fallback_tfm)) {
854 printk(KERN_WARNING MV_CESA
855 "Fallback driver '%s' could not be loaded!\n",
856 fallback_driver_name);
857 err = PTR_ERR(fallback_tfm);
858 goto out;
860 ctx->fallback = fallback_tfm;
862 if (base_hash_name) {
863 /* Allocate a hash to compute the ipad/opad of hmac. */
864 base_hash = crypto_alloc_shash(base_hash_name, 0,
865 CRYPTO_ALG_NEED_FALLBACK);
866 if (IS_ERR(base_hash)) {
867 printk(KERN_WARNING MV_CESA
868 "Base driver '%s' could not be loaded!\n",
869 base_hash_name);
870 err = PTR_ERR(base_hash);
871 goto err_bad_base;
874 ctx->base_hash = base_hash;
876 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
877 sizeof(struct mv_req_hash_ctx) +
878 crypto_shash_descsize(ctx->fallback));
879 return 0;
880 err_bad_base:
881 crypto_free_shash(fallback_tfm);
882 out:
883 return err;
886 static void mv_cra_hash_exit(struct crypto_tfm *tfm)
888 struct mv_tfm_hash_ctx *ctx = crypto_tfm_ctx(tfm);
890 crypto_free_shash(ctx->fallback);
891 if (ctx->base_hash)
892 crypto_free_shash(ctx->base_hash);
895 static int mv_cra_hash_sha1_init(struct crypto_tfm *tfm)
897 return mv_cra_hash_init(tfm, NULL, COP_SHA1, 0);
900 static int mv_cra_hash_hmac_sha1_init(struct crypto_tfm *tfm)
902 return mv_cra_hash_init(tfm, "sha1", COP_HMAC_SHA1, SHA1_BLOCK_SIZE);
905 static irqreturn_t crypto_int(int irq, void *priv)
907 u32 val;
909 val = readl(cpg->reg + SEC_ACCEL_INT_STATUS);
910 if (!(val & SEC_INT_ACCEL0_DONE))
911 return IRQ_NONE;
913 if (!del_timer(&cpg->completion_timer)) {
914 printk(KERN_WARNING MV_CESA
915 "got an interrupt but no pending timer?\n");
917 val &= ~SEC_INT_ACCEL0_DONE;
918 writel(val, cpg->reg + FPGA_INT_STATUS);
919 writel(val, cpg->reg + SEC_ACCEL_INT_STATUS);
920 BUG_ON(cpg->eng_st != ENGINE_BUSY);
921 cpg->eng_st = ENGINE_W_DEQUEUE;
922 wake_up_process(cpg->queue_th);
923 return IRQ_HANDLED;
926 static struct crypto_alg mv_aes_alg_ecb = {
927 .cra_name = "ecb(aes)",
928 .cra_driver_name = "mv-ecb-aes",
929 .cra_priority = 300,
930 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
931 CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC,
932 .cra_blocksize = 16,
933 .cra_ctxsize = sizeof(struct mv_ctx),
934 .cra_alignmask = 0,
935 .cra_type = &crypto_ablkcipher_type,
936 .cra_module = THIS_MODULE,
937 .cra_init = mv_cra_init,
938 .cra_u = {
939 .ablkcipher = {
940 .min_keysize = AES_MIN_KEY_SIZE,
941 .max_keysize = AES_MAX_KEY_SIZE,
942 .setkey = mv_setkey_aes,
943 .encrypt = mv_enc_aes_ecb,
944 .decrypt = mv_dec_aes_ecb,
949 static struct crypto_alg mv_aes_alg_cbc = {
950 .cra_name = "cbc(aes)",
951 .cra_driver_name = "mv-cbc-aes",
952 .cra_priority = 300,
953 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
954 CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC,
955 .cra_blocksize = AES_BLOCK_SIZE,
956 .cra_ctxsize = sizeof(struct mv_ctx),
957 .cra_alignmask = 0,
958 .cra_type = &crypto_ablkcipher_type,
959 .cra_module = THIS_MODULE,
960 .cra_init = mv_cra_init,
961 .cra_u = {
962 .ablkcipher = {
963 .ivsize = AES_BLOCK_SIZE,
964 .min_keysize = AES_MIN_KEY_SIZE,
965 .max_keysize = AES_MAX_KEY_SIZE,
966 .setkey = mv_setkey_aes,
967 .encrypt = mv_enc_aes_cbc,
968 .decrypt = mv_dec_aes_cbc,
973 static struct ahash_alg mv_sha1_alg = {
974 .init = mv_hash_init,
975 .update = mv_hash_update,
976 .final = mv_hash_final,
977 .finup = mv_hash_finup,
978 .digest = mv_hash_digest,
979 .halg = {
980 .digestsize = SHA1_DIGEST_SIZE,
981 .base = {
982 .cra_name = "sha1",
983 .cra_driver_name = "mv-sha1",
984 .cra_priority = 300,
985 .cra_flags =
986 CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY |
987 CRYPTO_ALG_NEED_FALLBACK,
988 .cra_blocksize = SHA1_BLOCK_SIZE,
989 .cra_ctxsize = sizeof(struct mv_tfm_hash_ctx),
990 .cra_init = mv_cra_hash_sha1_init,
991 .cra_exit = mv_cra_hash_exit,
992 .cra_module = THIS_MODULE,
997 static struct ahash_alg mv_hmac_sha1_alg = {
998 .init = mv_hash_init,
999 .update = mv_hash_update,
1000 .final = mv_hash_final,
1001 .finup = mv_hash_finup,
1002 .digest = mv_hash_digest,
1003 .setkey = mv_hash_setkey,
1004 .halg = {
1005 .digestsize = SHA1_DIGEST_SIZE,
1006 .base = {
1007 .cra_name = "hmac(sha1)",
1008 .cra_driver_name = "mv-hmac-sha1",
1009 .cra_priority = 300,
1010 .cra_flags =
1011 CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY |
1012 CRYPTO_ALG_NEED_FALLBACK,
1013 .cra_blocksize = SHA1_BLOCK_SIZE,
1014 .cra_ctxsize = sizeof(struct mv_tfm_hash_ctx),
1015 .cra_init = mv_cra_hash_hmac_sha1_init,
1016 .cra_exit = mv_cra_hash_exit,
1017 .cra_module = THIS_MODULE,
1022 static int mv_probe(struct platform_device *pdev)
1024 struct crypto_priv *cp;
1025 struct resource *res;
1026 int irq;
1027 int ret;
1029 if (cpg) {
1030 printk(KERN_ERR MV_CESA "Second crypto dev?\n");
1031 return -EEXIST;
1034 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "regs");
1035 if (!res)
1036 return -ENXIO;
1038 cp = kzalloc(sizeof(*cp), GFP_KERNEL);
1039 if (!cp)
1040 return -ENOMEM;
1042 spin_lock_init(&cp->lock);
1043 crypto_init_queue(&cp->queue, 50);
1044 cp->reg = ioremap(res->start, resource_size(res));
1045 if (!cp->reg) {
1046 ret = -ENOMEM;
1047 goto err;
1050 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "sram");
1051 if (!res) {
1052 ret = -ENXIO;
1053 goto err_unmap_reg;
1055 cp->sram_size = resource_size(res);
1056 cp->max_req_size = cp->sram_size - SRAM_CFG_SPACE;
1057 cp->sram = ioremap(res->start, cp->sram_size);
1058 if (!cp->sram) {
1059 ret = -ENOMEM;
1060 goto err_unmap_reg;
1063 if (pdev->dev.of_node)
1064 irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
1065 else
1066 irq = platform_get_irq(pdev, 0);
1067 if (irq < 0 || irq == NO_IRQ) {
1068 ret = irq;
1069 goto err_unmap_sram;
1071 cp->irq = irq;
1073 platform_set_drvdata(pdev, cp);
1074 cpg = cp;
1076 cp->queue_th = kthread_run(queue_manag, cp, "mv_crypto");
1077 if (IS_ERR(cp->queue_th)) {
1078 ret = PTR_ERR(cp->queue_th);
1079 goto err_unmap_sram;
1082 ret = request_irq(irq, crypto_int, 0, dev_name(&pdev->dev),
1083 cp);
1084 if (ret)
1085 goto err_thread;
1087 /* Not all platforms can gate the clock, so it is not
1088 an error if the clock does not exists. */
1089 cp->clk = clk_get(&pdev->dev, NULL);
1090 if (!IS_ERR(cp->clk))
1091 clk_prepare_enable(cp->clk);
1093 writel(0, cpg->reg + SEC_ACCEL_INT_STATUS);
1094 writel(SEC_INT_ACCEL0_DONE, cpg->reg + SEC_ACCEL_INT_MASK);
1095 writel(SEC_CFG_STOP_DIG_ERR, cpg->reg + SEC_ACCEL_CFG);
1096 writel(SRAM_CONFIG, cpg->reg + SEC_ACCEL_DESC_P0);
1098 ret = crypto_register_alg(&mv_aes_alg_ecb);
1099 if (ret) {
1100 printk(KERN_WARNING MV_CESA
1101 "Could not register aes-ecb driver\n");
1102 goto err_irq;
1105 ret = crypto_register_alg(&mv_aes_alg_cbc);
1106 if (ret) {
1107 printk(KERN_WARNING MV_CESA
1108 "Could not register aes-cbc driver\n");
1109 goto err_unreg_ecb;
1112 ret = crypto_register_ahash(&mv_sha1_alg);
1113 if (ret == 0)
1114 cpg->has_sha1 = 1;
1115 else
1116 printk(KERN_WARNING MV_CESA "Could not register sha1 driver\n");
1118 ret = crypto_register_ahash(&mv_hmac_sha1_alg);
1119 if (ret == 0) {
1120 cpg->has_hmac_sha1 = 1;
1121 } else {
1122 printk(KERN_WARNING MV_CESA
1123 "Could not register hmac-sha1 driver\n");
1126 return 0;
1127 err_unreg_ecb:
1128 crypto_unregister_alg(&mv_aes_alg_ecb);
1129 err_irq:
1130 free_irq(irq, cp);
1131 if (!IS_ERR(cp->clk)) {
1132 clk_disable_unprepare(cp->clk);
1133 clk_put(cp->clk);
1135 err_thread:
1136 kthread_stop(cp->queue_th);
1137 err_unmap_sram:
1138 iounmap(cp->sram);
1139 err_unmap_reg:
1140 iounmap(cp->reg);
1141 err:
1142 kfree(cp);
1143 cpg = NULL;
1144 return ret;
1147 static int mv_remove(struct platform_device *pdev)
1149 struct crypto_priv *cp = platform_get_drvdata(pdev);
1151 crypto_unregister_alg(&mv_aes_alg_ecb);
1152 crypto_unregister_alg(&mv_aes_alg_cbc);
1153 if (cp->has_sha1)
1154 crypto_unregister_ahash(&mv_sha1_alg);
1155 if (cp->has_hmac_sha1)
1156 crypto_unregister_ahash(&mv_hmac_sha1_alg);
1157 kthread_stop(cp->queue_th);
1158 free_irq(cp->irq, cp);
1159 memset(cp->sram, 0, cp->sram_size);
1160 iounmap(cp->sram);
1161 iounmap(cp->reg);
1163 if (!IS_ERR(cp->clk)) {
1164 clk_disable_unprepare(cp->clk);
1165 clk_put(cp->clk);
1168 kfree(cp);
1169 cpg = NULL;
1170 return 0;
1173 static const struct of_device_id mv_cesa_of_match_table[] = {
1174 { .compatible = "marvell,orion-crypto", },
1177 MODULE_DEVICE_TABLE(of, mv_cesa_of_match_table);
1179 static struct platform_driver marvell_crypto = {
1180 .probe = mv_probe,
1181 .remove = mv_remove,
1182 .driver = {
1183 .name = "mv_crypto",
1184 .of_match_table = mv_cesa_of_match_table,
1187 MODULE_ALIAS("platform:mv_crypto");
1189 module_platform_driver(marvell_crypto);
1191 MODULE_AUTHOR("Sebastian Andrzej Siewior <sebastian@breakpoint.cc>");
1192 MODULE_DESCRIPTION("Support for Marvell's cryptographic engine");
1193 MODULE_LICENSE("GPL");