Merge remote-tracking branch 'moduleh/module.h-split'
[linux-2.6/next.git] / drivers / crypto / mv_cesa.c
blob5c6f56f21443ae7e01a1b9fc90b3202674fbaa37
1 /*
2 * Support for Marvell's crypto engine which can be found on some Orion5X
3 * boards.
5 * Author: Sebastian Andrzej Siewior < sebastian at breakpoint dot cc >
6 * License: GPLv2
8 */
9 #include <crypto/aes.h>
10 #include <crypto/algapi.h>
11 #include <linux/crypto.h>
12 #include <linux/interrupt.h>
13 #include <linux/io.h>
14 #include <linux/kthread.h>
15 #include <linux/platform_device.h>
16 #include <linux/scatterlist.h>
17 #include <linux/slab.h>
18 #include <linux/module.h>
19 #include <crypto/internal/hash.h>
20 #include <crypto/sha.h>
22 #include "mv_cesa.h"
24 #define MV_CESA "MV-CESA:"
25 #define MAX_HW_HASH_SIZE 0xFFFF
28 * STM:
29 * /---------------------------------------\
30 * | | request complete
31 * \./ |
32 * IDLE -> new request -> BUSY -> done -> DEQUEUE
33 * /°\ |
34 * | | more scatter entries
35 * \________________/
37 enum engine_status {
38 ENGINE_IDLE,
39 ENGINE_BUSY,
40 ENGINE_W_DEQUEUE,
43 /**
44 * struct req_progress - used for every crypt request
45 * @src_sg_it: sg iterator for src
46 * @dst_sg_it: sg iterator for dst
47 * @sg_src_left: bytes left in src to process (scatter list)
48 * @src_start: offset to add to src start position (scatter list)
49 * @crypt_len: length of current hw crypt/hash process
50 * @hw_nbytes: total bytes to process in hw for this request
51 * @copy_back: whether to copy data back (crypt) or not (hash)
52 * @sg_dst_left: bytes left dst to process in this scatter list
53 * @dst_start: offset to add to dst start position (scatter list)
54 * @hw_processed_bytes: number of bytes processed by hw (request).
56 * sg helper are used to iterate over the scatterlist. Since the size of the
57 * SRAM may be less than the scatter size, this struct struct is used to keep
58 * track of progress within current scatterlist.
60 struct req_progress {
61 struct sg_mapping_iter src_sg_it;
62 struct sg_mapping_iter dst_sg_it;
63 void (*complete) (void);
64 void (*process) (int is_first);
66 /* src mostly */
67 int sg_src_left;
68 int src_start;
69 int crypt_len;
70 int hw_nbytes;
71 /* dst mostly */
72 int copy_back;
73 int sg_dst_left;
74 int dst_start;
75 int hw_processed_bytes;
78 struct crypto_priv {
79 void __iomem *reg;
80 void __iomem *sram;
81 int irq;
82 struct task_struct *queue_th;
84 /* the lock protects queue and eng_st */
85 spinlock_t lock;
86 struct crypto_queue queue;
87 enum engine_status eng_st;
88 struct crypto_async_request *cur_req;
89 struct req_progress p;
90 int max_req_size;
91 int sram_size;
92 int has_sha1;
93 int has_hmac_sha1;
96 static struct crypto_priv *cpg;
98 struct mv_ctx {
99 u8 aes_enc_key[AES_KEY_LEN];
100 u32 aes_dec_key[8];
101 int key_len;
102 u32 need_calc_aes_dkey;
105 enum crypto_op {
106 COP_AES_ECB,
107 COP_AES_CBC,
110 struct mv_req_ctx {
111 enum crypto_op op;
112 int decrypt;
115 enum hash_op {
116 COP_SHA1,
117 COP_HMAC_SHA1
120 struct mv_tfm_hash_ctx {
121 struct crypto_shash *fallback;
122 struct crypto_shash *base_hash;
123 u32 ivs[2 * SHA1_DIGEST_SIZE / 4];
124 int count_add;
125 enum hash_op op;
128 struct mv_req_hash_ctx {
129 u64 count;
130 u32 state[SHA1_DIGEST_SIZE / 4];
131 u8 buffer[SHA1_BLOCK_SIZE];
132 int first_hash; /* marks that we don't have previous state */
133 int last_chunk; /* marks that this is the 'final' request */
134 int extra_bytes; /* unprocessed bytes in buffer */
135 enum hash_op op;
136 int count_add;
139 static void compute_aes_dec_key(struct mv_ctx *ctx)
141 struct crypto_aes_ctx gen_aes_key;
142 int key_pos;
144 if (!ctx->need_calc_aes_dkey)
145 return;
147 crypto_aes_expand_key(&gen_aes_key, ctx->aes_enc_key, ctx->key_len);
149 key_pos = ctx->key_len + 24;
150 memcpy(ctx->aes_dec_key, &gen_aes_key.key_enc[key_pos], 4 * 4);
151 switch (ctx->key_len) {
152 case AES_KEYSIZE_256:
153 key_pos -= 2;
154 /* fall */
155 case AES_KEYSIZE_192:
156 key_pos -= 2;
157 memcpy(&ctx->aes_dec_key[4], &gen_aes_key.key_enc[key_pos],
158 4 * 4);
159 break;
161 ctx->need_calc_aes_dkey = 0;
164 static int mv_setkey_aes(struct crypto_ablkcipher *cipher, const u8 *key,
165 unsigned int len)
167 struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
168 struct mv_ctx *ctx = crypto_tfm_ctx(tfm);
170 switch (len) {
171 case AES_KEYSIZE_128:
172 case AES_KEYSIZE_192:
173 case AES_KEYSIZE_256:
174 break;
175 default:
176 crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
177 return -EINVAL;
179 ctx->key_len = len;
180 ctx->need_calc_aes_dkey = 1;
182 memcpy(ctx->aes_enc_key, key, AES_KEY_LEN);
183 return 0;
186 static void copy_src_to_buf(struct req_progress *p, char *dbuf, int len)
188 int ret;
189 void *sbuf;
190 int copy_len;
192 while (len) {
193 if (!p->sg_src_left) {
194 ret = sg_miter_next(&p->src_sg_it);
195 BUG_ON(!ret);
196 p->sg_src_left = p->src_sg_it.length;
197 p->src_start = 0;
200 sbuf = p->src_sg_it.addr + p->src_start;
202 copy_len = min(p->sg_src_left, len);
203 memcpy(dbuf, sbuf, copy_len);
205 p->src_start += copy_len;
206 p->sg_src_left -= copy_len;
208 len -= copy_len;
209 dbuf += copy_len;
213 static void setup_data_in(void)
215 struct req_progress *p = &cpg->p;
216 int data_in_sram =
217 min(p->hw_nbytes - p->hw_processed_bytes, cpg->max_req_size);
218 copy_src_to_buf(p, cpg->sram + SRAM_DATA_IN_START + p->crypt_len,
219 data_in_sram - p->crypt_len);
220 p->crypt_len = data_in_sram;
223 static void mv_process_current_q(int first_block)
225 struct ablkcipher_request *req = ablkcipher_request_cast(cpg->cur_req);
226 struct mv_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
227 struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req);
228 struct sec_accel_config op;
230 switch (req_ctx->op) {
231 case COP_AES_ECB:
232 op.config = CFG_OP_CRYPT_ONLY | CFG_ENCM_AES | CFG_ENC_MODE_ECB;
233 break;
234 case COP_AES_CBC:
235 default:
236 op.config = CFG_OP_CRYPT_ONLY | CFG_ENCM_AES | CFG_ENC_MODE_CBC;
237 op.enc_iv = ENC_IV_POINT(SRAM_DATA_IV) |
238 ENC_IV_BUF_POINT(SRAM_DATA_IV_BUF);
239 if (first_block)
240 memcpy(cpg->sram + SRAM_DATA_IV, req->info, 16);
241 break;
243 if (req_ctx->decrypt) {
244 op.config |= CFG_DIR_DEC;
245 memcpy(cpg->sram + SRAM_DATA_KEY_P, ctx->aes_dec_key,
246 AES_KEY_LEN);
247 } else {
248 op.config |= CFG_DIR_ENC;
249 memcpy(cpg->sram + SRAM_DATA_KEY_P, ctx->aes_enc_key,
250 AES_KEY_LEN);
253 switch (ctx->key_len) {
254 case AES_KEYSIZE_128:
255 op.config |= CFG_AES_LEN_128;
256 break;
257 case AES_KEYSIZE_192:
258 op.config |= CFG_AES_LEN_192;
259 break;
260 case AES_KEYSIZE_256:
261 op.config |= CFG_AES_LEN_256;
262 break;
264 op.enc_p = ENC_P_SRC(SRAM_DATA_IN_START) |
265 ENC_P_DST(SRAM_DATA_OUT_START);
266 op.enc_key_p = SRAM_DATA_KEY_P;
268 setup_data_in();
269 op.enc_len = cpg->p.crypt_len;
270 memcpy(cpg->sram + SRAM_CONFIG, &op,
271 sizeof(struct sec_accel_config));
273 /* GO */
274 writel(SEC_CMD_EN_SEC_ACCL0, cpg->reg + SEC_ACCEL_CMD);
277 * XXX: add timer if the interrupt does not occur for some mystery
278 * reason
282 static void mv_crypto_algo_completion(void)
284 struct ablkcipher_request *req = ablkcipher_request_cast(cpg->cur_req);
285 struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req);
287 sg_miter_stop(&cpg->p.src_sg_it);
288 sg_miter_stop(&cpg->p.dst_sg_it);
290 if (req_ctx->op != COP_AES_CBC)
291 return ;
293 memcpy(req->info, cpg->sram + SRAM_DATA_IV_BUF, 16);
296 static void mv_process_hash_current(int first_block)
298 struct ahash_request *req = ahash_request_cast(cpg->cur_req);
299 const struct mv_tfm_hash_ctx *tfm_ctx = crypto_tfm_ctx(req->base.tfm);
300 struct mv_req_hash_ctx *req_ctx = ahash_request_ctx(req);
301 struct req_progress *p = &cpg->p;
302 struct sec_accel_config op = { 0 };
303 int is_last;
305 switch (req_ctx->op) {
306 case COP_SHA1:
307 default:
308 op.config = CFG_OP_MAC_ONLY | CFG_MACM_SHA1;
309 break;
310 case COP_HMAC_SHA1:
311 op.config = CFG_OP_MAC_ONLY | CFG_MACM_HMAC_SHA1;
312 memcpy(cpg->sram + SRAM_HMAC_IV_IN,
313 tfm_ctx->ivs, sizeof(tfm_ctx->ivs));
314 break;
317 op.mac_src_p =
318 MAC_SRC_DATA_P(SRAM_DATA_IN_START) | MAC_SRC_TOTAL_LEN((u32)
319 req_ctx->
320 count);
322 setup_data_in();
324 op.mac_digest =
325 MAC_DIGEST_P(SRAM_DIGEST_BUF) | MAC_FRAG_LEN(p->crypt_len);
326 op.mac_iv =
327 MAC_INNER_IV_P(SRAM_HMAC_IV_IN) |
328 MAC_OUTER_IV_P(SRAM_HMAC_IV_OUT);
330 is_last = req_ctx->last_chunk
331 && (p->hw_processed_bytes + p->crypt_len >= p->hw_nbytes)
332 && (req_ctx->count <= MAX_HW_HASH_SIZE);
333 if (req_ctx->first_hash) {
334 if (is_last)
335 op.config |= CFG_NOT_FRAG;
336 else
337 op.config |= CFG_FIRST_FRAG;
339 req_ctx->first_hash = 0;
340 } else {
341 if (is_last)
342 op.config |= CFG_LAST_FRAG;
343 else
344 op.config |= CFG_MID_FRAG;
346 writel(req_ctx->state[0], cpg->reg + DIGEST_INITIAL_VAL_A);
347 writel(req_ctx->state[1], cpg->reg + DIGEST_INITIAL_VAL_B);
348 writel(req_ctx->state[2], cpg->reg + DIGEST_INITIAL_VAL_C);
349 writel(req_ctx->state[3], cpg->reg + DIGEST_INITIAL_VAL_D);
350 writel(req_ctx->state[4], cpg->reg + DIGEST_INITIAL_VAL_E);
353 memcpy(cpg->sram + SRAM_CONFIG, &op, sizeof(struct sec_accel_config));
355 /* GO */
356 writel(SEC_CMD_EN_SEC_ACCL0, cpg->reg + SEC_ACCEL_CMD);
359 * XXX: add timer if the interrupt does not occur for some mystery
360 * reason
364 static inline int mv_hash_import_sha1_ctx(const struct mv_req_hash_ctx *ctx,
365 struct shash_desc *desc)
367 int i;
368 struct sha1_state shash_state;
370 shash_state.count = ctx->count + ctx->count_add;
371 for (i = 0; i < 5; i++)
372 shash_state.state[i] = ctx->state[i];
373 memcpy(shash_state.buffer, ctx->buffer, sizeof(shash_state.buffer));
374 return crypto_shash_import(desc, &shash_state);
377 static int mv_hash_final_fallback(struct ahash_request *req)
379 const struct mv_tfm_hash_ctx *tfm_ctx = crypto_tfm_ctx(req->base.tfm);
380 struct mv_req_hash_ctx *req_ctx = ahash_request_ctx(req);
381 struct {
382 struct shash_desc shash;
383 char ctx[crypto_shash_descsize(tfm_ctx->fallback)];
384 } desc;
385 int rc;
387 desc.shash.tfm = tfm_ctx->fallback;
388 desc.shash.flags = CRYPTO_TFM_REQ_MAY_SLEEP;
389 if (unlikely(req_ctx->first_hash)) {
390 crypto_shash_init(&desc.shash);
391 crypto_shash_update(&desc.shash, req_ctx->buffer,
392 req_ctx->extra_bytes);
393 } else {
394 /* only SHA1 for now....
396 rc = mv_hash_import_sha1_ctx(req_ctx, &desc.shash);
397 if (rc)
398 goto out;
400 rc = crypto_shash_final(&desc.shash, req->result);
401 out:
402 return rc;
405 static void mv_hash_algo_completion(void)
407 struct ahash_request *req = ahash_request_cast(cpg->cur_req);
408 struct mv_req_hash_ctx *ctx = ahash_request_ctx(req);
410 if (ctx->extra_bytes)
411 copy_src_to_buf(&cpg->p, ctx->buffer, ctx->extra_bytes);
412 sg_miter_stop(&cpg->p.src_sg_it);
414 if (likely(ctx->last_chunk)) {
415 if (likely(ctx->count <= MAX_HW_HASH_SIZE)) {
416 memcpy(req->result, cpg->sram + SRAM_DIGEST_BUF,
417 crypto_ahash_digestsize(crypto_ahash_reqtfm
418 (req)));
419 } else
420 mv_hash_final_fallback(req);
421 } else {
422 ctx->state[0] = readl(cpg->reg + DIGEST_INITIAL_VAL_A);
423 ctx->state[1] = readl(cpg->reg + DIGEST_INITIAL_VAL_B);
424 ctx->state[2] = readl(cpg->reg + DIGEST_INITIAL_VAL_C);
425 ctx->state[3] = readl(cpg->reg + DIGEST_INITIAL_VAL_D);
426 ctx->state[4] = readl(cpg->reg + DIGEST_INITIAL_VAL_E);
430 static void dequeue_complete_req(void)
432 struct crypto_async_request *req = cpg->cur_req;
433 void *buf;
434 int ret;
435 cpg->p.hw_processed_bytes += cpg->p.crypt_len;
436 if (cpg->p.copy_back) {
437 int need_copy_len = cpg->p.crypt_len;
438 int sram_offset = 0;
439 do {
440 int dst_copy;
442 if (!cpg->p.sg_dst_left) {
443 ret = sg_miter_next(&cpg->p.dst_sg_it);
444 BUG_ON(!ret);
445 cpg->p.sg_dst_left = cpg->p.dst_sg_it.length;
446 cpg->p.dst_start = 0;
449 buf = cpg->p.dst_sg_it.addr;
450 buf += cpg->p.dst_start;
452 dst_copy = min(need_copy_len, cpg->p.sg_dst_left);
454 memcpy(buf,
455 cpg->sram + SRAM_DATA_OUT_START + sram_offset,
456 dst_copy);
457 sram_offset += dst_copy;
458 cpg->p.sg_dst_left -= dst_copy;
459 need_copy_len -= dst_copy;
460 cpg->p.dst_start += dst_copy;
461 } while (need_copy_len > 0);
464 cpg->p.crypt_len = 0;
466 BUG_ON(cpg->eng_st != ENGINE_W_DEQUEUE);
467 if (cpg->p.hw_processed_bytes < cpg->p.hw_nbytes) {
468 /* process next scatter list entry */
469 cpg->eng_st = ENGINE_BUSY;
470 cpg->p.process(0);
471 } else {
472 cpg->p.complete();
473 cpg->eng_st = ENGINE_IDLE;
474 local_bh_disable();
475 req->complete(req, 0);
476 local_bh_enable();
480 static int count_sgs(struct scatterlist *sl, unsigned int total_bytes)
482 int i = 0;
483 size_t cur_len;
485 while (sl) {
486 cur_len = sl[i].length;
487 ++i;
488 if (total_bytes > cur_len)
489 total_bytes -= cur_len;
490 else
491 break;
494 return i;
497 static void mv_start_new_crypt_req(struct ablkcipher_request *req)
499 struct req_progress *p = &cpg->p;
500 int num_sgs;
502 cpg->cur_req = &req->base;
503 memset(p, 0, sizeof(struct req_progress));
504 p->hw_nbytes = req->nbytes;
505 p->complete = mv_crypto_algo_completion;
506 p->process = mv_process_current_q;
507 p->copy_back = 1;
509 num_sgs = count_sgs(req->src, req->nbytes);
510 sg_miter_start(&p->src_sg_it, req->src, num_sgs, SG_MITER_FROM_SG);
512 num_sgs = count_sgs(req->dst, req->nbytes);
513 sg_miter_start(&p->dst_sg_it, req->dst, num_sgs, SG_MITER_TO_SG);
515 mv_process_current_q(1);
518 static void mv_start_new_hash_req(struct ahash_request *req)
520 struct req_progress *p = &cpg->p;
521 struct mv_req_hash_ctx *ctx = ahash_request_ctx(req);
522 int num_sgs, hw_bytes, old_extra_bytes, rc;
523 cpg->cur_req = &req->base;
524 memset(p, 0, sizeof(struct req_progress));
525 hw_bytes = req->nbytes + ctx->extra_bytes;
526 old_extra_bytes = ctx->extra_bytes;
528 ctx->extra_bytes = hw_bytes % SHA1_BLOCK_SIZE;
529 if (ctx->extra_bytes != 0
530 && (!ctx->last_chunk || ctx->count > MAX_HW_HASH_SIZE))
531 hw_bytes -= ctx->extra_bytes;
532 else
533 ctx->extra_bytes = 0;
535 num_sgs = count_sgs(req->src, req->nbytes);
536 sg_miter_start(&p->src_sg_it, req->src, num_sgs, SG_MITER_FROM_SG);
538 if (hw_bytes) {
539 p->hw_nbytes = hw_bytes;
540 p->complete = mv_hash_algo_completion;
541 p->process = mv_process_hash_current;
543 if (unlikely(old_extra_bytes)) {
544 memcpy(cpg->sram + SRAM_DATA_IN_START, ctx->buffer,
545 old_extra_bytes);
546 p->crypt_len = old_extra_bytes;
549 mv_process_hash_current(1);
550 } else {
551 copy_src_to_buf(p, ctx->buffer + old_extra_bytes,
552 ctx->extra_bytes - old_extra_bytes);
553 sg_miter_stop(&p->src_sg_it);
554 if (ctx->last_chunk)
555 rc = mv_hash_final_fallback(req);
556 else
557 rc = 0;
558 cpg->eng_st = ENGINE_IDLE;
559 local_bh_disable();
560 req->base.complete(&req->base, rc);
561 local_bh_enable();
565 static int queue_manag(void *data)
567 cpg->eng_st = ENGINE_IDLE;
568 do {
569 struct crypto_async_request *async_req = NULL;
570 struct crypto_async_request *backlog;
572 __set_current_state(TASK_INTERRUPTIBLE);
574 if (cpg->eng_st == ENGINE_W_DEQUEUE)
575 dequeue_complete_req();
577 spin_lock_irq(&cpg->lock);
578 if (cpg->eng_st == ENGINE_IDLE) {
579 backlog = crypto_get_backlog(&cpg->queue);
580 async_req = crypto_dequeue_request(&cpg->queue);
581 if (async_req) {
582 BUG_ON(cpg->eng_st != ENGINE_IDLE);
583 cpg->eng_st = ENGINE_BUSY;
586 spin_unlock_irq(&cpg->lock);
588 if (backlog) {
589 backlog->complete(backlog, -EINPROGRESS);
590 backlog = NULL;
593 if (async_req) {
594 if (async_req->tfm->__crt_alg->cra_type !=
595 &crypto_ahash_type) {
596 struct ablkcipher_request *req =
597 ablkcipher_request_cast(async_req);
598 mv_start_new_crypt_req(req);
599 } else {
600 struct ahash_request *req =
601 ahash_request_cast(async_req);
602 mv_start_new_hash_req(req);
604 async_req = NULL;
607 schedule();
609 } while (!kthread_should_stop());
610 return 0;
613 static int mv_handle_req(struct crypto_async_request *req)
615 unsigned long flags;
616 int ret;
618 spin_lock_irqsave(&cpg->lock, flags);
619 ret = crypto_enqueue_request(&cpg->queue, req);
620 spin_unlock_irqrestore(&cpg->lock, flags);
621 wake_up_process(cpg->queue_th);
622 return ret;
625 static int mv_enc_aes_ecb(struct ablkcipher_request *req)
627 struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req);
629 req_ctx->op = COP_AES_ECB;
630 req_ctx->decrypt = 0;
632 return mv_handle_req(&req->base);
635 static int mv_dec_aes_ecb(struct ablkcipher_request *req)
637 struct mv_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
638 struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req);
640 req_ctx->op = COP_AES_ECB;
641 req_ctx->decrypt = 1;
643 compute_aes_dec_key(ctx);
644 return mv_handle_req(&req->base);
647 static int mv_enc_aes_cbc(struct ablkcipher_request *req)
649 struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req);
651 req_ctx->op = COP_AES_CBC;
652 req_ctx->decrypt = 0;
654 return mv_handle_req(&req->base);
657 static int mv_dec_aes_cbc(struct ablkcipher_request *req)
659 struct mv_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
660 struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req);
662 req_ctx->op = COP_AES_CBC;
663 req_ctx->decrypt = 1;
665 compute_aes_dec_key(ctx);
666 return mv_handle_req(&req->base);
669 static int mv_cra_init(struct crypto_tfm *tfm)
671 tfm->crt_ablkcipher.reqsize = sizeof(struct mv_req_ctx);
672 return 0;
675 static void mv_init_hash_req_ctx(struct mv_req_hash_ctx *ctx, int op,
676 int is_last, unsigned int req_len,
677 int count_add)
679 memset(ctx, 0, sizeof(*ctx));
680 ctx->op = op;
681 ctx->count = req_len;
682 ctx->first_hash = 1;
683 ctx->last_chunk = is_last;
684 ctx->count_add = count_add;
687 static void mv_update_hash_req_ctx(struct mv_req_hash_ctx *ctx, int is_last,
688 unsigned req_len)
690 ctx->last_chunk = is_last;
691 ctx->count += req_len;
694 static int mv_hash_init(struct ahash_request *req)
696 const struct mv_tfm_hash_ctx *tfm_ctx = crypto_tfm_ctx(req->base.tfm);
697 mv_init_hash_req_ctx(ahash_request_ctx(req), tfm_ctx->op, 0, 0,
698 tfm_ctx->count_add);
699 return 0;
702 static int mv_hash_update(struct ahash_request *req)
704 if (!req->nbytes)
705 return 0;
707 mv_update_hash_req_ctx(ahash_request_ctx(req), 0, req->nbytes);
708 return mv_handle_req(&req->base);
711 static int mv_hash_final(struct ahash_request *req)
713 struct mv_req_hash_ctx *ctx = ahash_request_ctx(req);
715 mv_update_hash_req_ctx(ctx, 1, 0);
716 return mv_handle_req(&req->base);
719 static int mv_hash_finup(struct ahash_request *req)
721 mv_update_hash_req_ctx(ahash_request_ctx(req), 1, req->nbytes);
722 return mv_handle_req(&req->base);
725 static int mv_hash_digest(struct ahash_request *req)
727 const struct mv_tfm_hash_ctx *tfm_ctx = crypto_tfm_ctx(req->base.tfm);
728 mv_init_hash_req_ctx(ahash_request_ctx(req), tfm_ctx->op, 1,
729 req->nbytes, tfm_ctx->count_add);
730 return mv_handle_req(&req->base);
733 static void mv_hash_init_ivs(struct mv_tfm_hash_ctx *ctx, const void *istate,
734 const void *ostate)
736 const struct sha1_state *isha1_state = istate, *osha1_state = ostate;
737 int i;
738 for (i = 0; i < 5; i++) {
739 ctx->ivs[i] = cpu_to_be32(isha1_state->state[i]);
740 ctx->ivs[i + 5] = cpu_to_be32(osha1_state->state[i]);
744 static int mv_hash_setkey(struct crypto_ahash *tfm, const u8 * key,
745 unsigned int keylen)
747 int rc;
748 struct mv_tfm_hash_ctx *ctx = crypto_tfm_ctx(&tfm->base);
749 int bs, ds, ss;
751 if (!ctx->base_hash)
752 return 0;
754 rc = crypto_shash_setkey(ctx->fallback, key, keylen);
755 if (rc)
756 return rc;
758 /* Can't see a way to extract the ipad/opad from the fallback tfm
759 so I'm basically copying code from the hmac module */
760 bs = crypto_shash_blocksize(ctx->base_hash);
761 ds = crypto_shash_digestsize(ctx->base_hash);
762 ss = crypto_shash_statesize(ctx->base_hash);
765 struct {
766 struct shash_desc shash;
767 char ctx[crypto_shash_descsize(ctx->base_hash)];
768 } desc;
769 unsigned int i;
770 char ipad[ss];
771 char opad[ss];
773 desc.shash.tfm = ctx->base_hash;
774 desc.shash.flags = crypto_shash_get_flags(ctx->base_hash) &
775 CRYPTO_TFM_REQ_MAY_SLEEP;
777 if (keylen > bs) {
778 int err;
780 err =
781 crypto_shash_digest(&desc.shash, key, keylen, ipad);
782 if (err)
783 return err;
785 keylen = ds;
786 } else
787 memcpy(ipad, key, keylen);
789 memset(ipad + keylen, 0, bs - keylen);
790 memcpy(opad, ipad, bs);
792 for (i = 0; i < bs; i++) {
793 ipad[i] ^= 0x36;
794 opad[i] ^= 0x5c;
797 rc = crypto_shash_init(&desc.shash) ? :
798 crypto_shash_update(&desc.shash, ipad, bs) ? :
799 crypto_shash_export(&desc.shash, ipad) ? :
800 crypto_shash_init(&desc.shash) ? :
801 crypto_shash_update(&desc.shash, opad, bs) ? :
802 crypto_shash_export(&desc.shash, opad);
804 if (rc == 0)
805 mv_hash_init_ivs(ctx, ipad, opad);
807 return rc;
811 static int mv_cra_hash_init(struct crypto_tfm *tfm, const char *base_hash_name,
812 enum hash_op op, int count_add)
814 const char *fallback_driver_name = tfm->__crt_alg->cra_name;
815 struct mv_tfm_hash_ctx *ctx = crypto_tfm_ctx(tfm);
816 struct crypto_shash *fallback_tfm = NULL;
817 struct crypto_shash *base_hash = NULL;
818 int err = -ENOMEM;
820 ctx->op = op;
821 ctx->count_add = count_add;
823 /* Allocate a fallback and abort if it failed. */
824 fallback_tfm = crypto_alloc_shash(fallback_driver_name, 0,
825 CRYPTO_ALG_NEED_FALLBACK);
826 if (IS_ERR(fallback_tfm)) {
827 printk(KERN_WARNING MV_CESA
828 "Fallback driver '%s' could not be loaded!\n",
829 fallback_driver_name);
830 err = PTR_ERR(fallback_tfm);
831 goto out;
833 ctx->fallback = fallback_tfm;
835 if (base_hash_name) {
836 /* Allocate a hash to compute the ipad/opad of hmac. */
837 base_hash = crypto_alloc_shash(base_hash_name, 0,
838 CRYPTO_ALG_NEED_FALLBACK);
839 if (IS_ERR(base_hash)) {
840 printk(KERN_WARNING MV_CESA
841 "Base driver '%s' could not be loaded!\n",
842 base_hash_name);
843 err = PTR_ERR(base_hash);
844 goto err_bad_base;
847 ctx->base_hash = base_hash;
849 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
850 sizeof(struct mv_req_hash_ctx) +
851 crypto_shash_descsize(ctx->fallback));
852 return 0;
853 err_bad_base:
854 crypto_free_shash(fallback_tfm);
855 out:
856 return err;
859 static void mv_cra_hash_exit(struct crypto_tfm *tfm)
861 struct mv_tfm_hash_ctx *ctx = crypto_tfm_ctx(tfm);
863 crypto_free_shash(ctx->fallback);
864 if (ctx->base_hash)
865 crypto_free_shash(ctx->base_hash);
868 static int mv_cra_hash_sha1_init(struct crypto_tfm *tfm)
870 return mv_cra_hash_init(tfm, NULL, COP_SHA1, 0);
873 static int mv_cra_hash_hmac_sha1_init(struct crypto_tfm *tfm)
875 return mv_cra_hash_init(tfm, "sha1", COP_HMAC_SHA1, SHA1_BLOCK_SIZE);
878 irqreturn_t crypto_int(int irq, void *priv)
880 u32 val;
882 val = readl(cpg->reg + SEC_ACCEL_INT_STATUS);
883 if (!(val & SEC_INT_ACCEL0_DONE))
884 return IRQ_NONE;
886 val &= ~SEC_INT_ACCEL0_DONE;
887 writel(val, cpg->reg + FPGA_INT_STATUS);
888 writel(val, cpg->reg + SEC_ACCEL_INT_STATUS);
889 BUG_ON(cpg->eng_st != ENGINE_BUSY);
890 cpg->eng_st = ENGINE_W_DEQUEUE;
891 wake_up_process(cpg->queue_th);
892 return IRQ_HANDLED;
895 struct crypto_alg mv_aes_alg_ecb = {
896 .cra_name = "ecb(aes)",
897 .cra_driver_name = "mv-ecb-aes",
898 .cra_priority = 300,
899 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
900 .cra_blocksize = 16,
901 .cra_ctxsize = sizeof(struct mv_ctx),
902 .cra_alignmask = 0,
903 .cra_type = &crypto_ablkcipher_type,
904 .cra_module = THIS_MODULE,
905 .cra_init = mv_cra_init,
906 .cra_u = {
907 .ablkcipher = {
908 .min_keysize = AES_MIN_KEY_SIZE,
909 .max_keysize = AES_MAX_KEY_SIZE,
910 .setkey = mv_setkey_aes,
911 .encrypt = mv_enc_aes_ecb,
912 .decrypt = mv_dec_aes_ecb,
917 struct crypto_alg mv_aes_alg_cbc = {
918 .cra_name = "cbc(aes)",
919 .cra_driver_name = "mv-cbc-aes",
920 .cra_priority = 300,
921 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
922 .cra_blocksize = AES_BLOCK_SIZE,
923 .cra_ctxsize = sizeof(struct mv_ctx),
924 .cra_alignmask = 0,
925 .cra_type = &crypto_ablkcipher_type,
926 .cra_module = THIS_MODULE,
927 .cra_init = mv_cra_init,
928 .cra_u = {
929 .ablkcipher = {
930 .ivsize = AES_BLOCK_SIZE,
931 .min_keysize = AES_MIN_KEY_SIZE,
932 .max_keysize = AES_MAX_KEY_SIZE,
933 .setkey = mv_setkey_aes,
934 .encrypt = mv_enc_aes_cbc,
935 .decrypt = mv_dec_aes_cbc,
940 struct ahash_alg mv_sha1_alg = {
941 .init = mv_hash_init,
942 .update = mv_hash_update,
943 .final = mv_hash_final,
944 .finup = mv_hash_finup,
945 .digest = mv_hash_digest,
946 .halg = {
947 .digestsize = SHA1_DIGEST_SIZE,
948 .base = {
949 .cra_name = "sha1",
950 .cra_driver_name = "mv-sha1",
951 .cra_priority = 300,
952 .cra_flags =
953 CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
954 .cra_blocksize = SHA1_BLOCK_SIZE,
955 .cra_ctxsize = sizeof(struct mv_tfm_hash_ctx),
956 .cra_init = mv_cra_hash_sha1_init,
957 .cra_exit = mv_cra_hash_exit,
958 .cra_module = THIS_MODULE,
963 struct ahash_alg mv_hmac_sha1_alg = {
964 .init = mv_hash_init,
965 .update = mv_hash_update,
966 .final = mv_hash_final,
967 .finup = mv_hash_finup,
968 .digest = mv_hash_digest,
969 .setkey = mv_hash_setkey,
970 .halg = {
971 .digestsize = SHA1_DIGEST_SIZE,
972 .base = {
973 .cra_name = "hmac(sha1)",
974 .cra_driver_name = "mv-hmac-sha1",
975 .cra_priority = 300,
976 .cra_flags =
977 CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
978 .cra_blocksize = SHA1_BLOCK_SIZE,
979 .cra_ctxsize = sizeof(struct mv_tfm_hash_ctx),
980 .cra_init = mv_cra_hash_hmac_sha1_init,
981 .cra_exit = mv_cra_hash_exit,
982 .cra_module = THIS_MODULE,
987 static int mv_probe(struct platform_device *pdev)
989 struct crypto_priv *cp;
990 struct resource *res;
991 int irq;
992 int ret;
994 if (cpg) {
995 printk(KERN_ERR MV_CESA "Second crypto dev?\n");
996 return -EEXIST;
999 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "regs");
1000 if (!res)
1001 return -ENXIO;
1003 cp = kzalloc(sizeof(*cp), GFP_KERNEL);
1004 if (!cp)
1005 return -ENOMEM;
1007 spin_lock_init(&cp->lock);
1008 crypto_init_queue(&cp->queue, 50);
1009 cp->reg = ioremap(res->start, resource_size(res));
1010 if (!cp->reg) {
1011 ret = -ENOMEM;
1012 goto err;
1015 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "sram");
1016 if (!res) {
1017 ret = -ENXIO;
1018 goto err_unmap_reg;
1020 cp->sram_size = resource_size(res);
1021 cp->max_req_size = cp->sram_size - SRAM_CFG_SPACE;
1022 cp->sram = ioremap(res->start, cp->sram_size);
1023 if (!cp->sram) {
1024 ret = -ENOMEM;
1025 goto err_unmap_reg;
1028 irq = platform_get_irq(pdev, 0);
1029 if (irq < 0 || irq == NO_IRQ) {
1030 ret = irq;
1031 goto err_unmap_sram;
1033 cp->irq = irq;
1035 platform_set_drvdata(pdev, cp);
1036 cpg = cp;
1038 cp->queue_th = kthread_run(queue_manag, cp, "mv_crypto");
1039 if (IS_ERR(cp->queue_th)) {
1040 ret = PTR_ERR(cp->queue_th);
1041 goto err_unmap_sram;
1044 ret = request_irq(irq, crypto_int, IRQF_DISABLED, dev_name(&pdev->dev),
1045 cp);
1046 if (ret)
1047 goto err_thread;
1049 writel(SEC_INT_ACCEL0_DONE, cpg->reg + SEC_ACCEL_INT_MASK);
1050 writel(SEC_CFG_STOP_DIG_ERR, cpg->reg + SEC_ACCEL_CFG);
1051 writel(SRAM_CONFIG, cpg->reg + SEC_ACCEL_DESC_P0);
1053 ret = crypto_register_alg(&mv_aes_alg_ecb);
1054 if (ret) {
1055 printk(KERN_WARNING MV_CESA
1056 "Could not register aes-ecb driver\n");
1057 goto err_irq;
1060 ret = crypto_register_alg(&mv_aes_alg_cbc);
1061 if (ret) {
1062 printk(KERN_WARNING MV_CESA
1063 "Could not register aes-cbc driver\n");
1064 goto err_unreg_ecb;
1067 ret = crypto_register_ahash(&mv_sha1_alg);
1068 if (ret == 0)
1069 cpg->has_sha1 = 1;
1070 else
1071 printk(KERN_WARNING MV_CESA "Could not register sha1 driver\n");
1073 ret = crypto_register_ahash(&mv_hmac_sha1_alg);
1074 if (ret == 0) {
1075 cpg->has_hmac_sha1 = 1;
1076 } else {
1077 printk(KERN_WARNING MV_CESA
1078 "Could not register hmac-sha1 driver\n");
1081 return 0;
1082 err_unreg_ecb:
1083 crypto_unregister_alg(&mv_aes_alg_ecb);
1084 err_irq:
1085 free_irq(irq, cp);
1086 err_thread:
1087 kthread_stop(cp->queue_th);
1088 err_unmap_sram:
1089 iounmap(cp->sram);
1090 err_unmap_reg:
1091 iounmap(cp->reg);
1092 err:
1093 kfree(cp);
1094 cpg = NULL;
1095 platform_set_drvdata(pdev, NULL);
1096 return ret;
1099 static int mv_remove(struct platform_device *pdev)
1101 struct crypto_priv *cp = platform_get_drvdata(pdev);
1103 crypto_unregister_alg(&mv_aes_alg_ecb);
1104 crypto_unregister_alg(&mv_aes_alg_cbc);
1105 if (cp->has_sha1)
1106 crypto_unregister_ahash(&mv_sha1_alg);
1107 if (cp->has_hmac_sha1)
1108 crypto_unregister_ahash(&mv_hmac_sha1_alg);
1109 kthread_stop(cp->queue_th);
1110 free_irq(cp->irq, cp);
1111 memset(cp->sram, 0, cp->sram_size);
1112 iounmap(cp->sram);
1113 iounmap(cp->reg);
1114 kfree(cp);
1115 cpg = NULL;
1116 return 0;
1119 static struct platform_driver marvell_crypto = {
1120 .probe = mv_probe,
1121 .remove = mv_remove,
1122 .driver = {
1123 .owner = THIS_MODULE,
1124 .name = "mv_crypto",
1127 MODULE_ALIAS("platform:mv_crypto");
1129 static int __init mv_crypto_init(void)
1131 return platform_driver_register(&marvell_crypto);
1133 module_init(mv_crypto_init);
1135 static void __exit mv_crypto_exit(void)
1137 platform_driver_unregister(&marvell_crypto);
1139 module_exit(mv_crypto_exit);
1141 MODULE_AUTHOR("Sebastian Andrzej Siewior <sebastian@breakpoint.cc>");
1142 MODULE_DESCRIPTION("Support for Marvell's cryptographic engine");
1143 MODULE_LICENSE("GPL");