OMAP3 SRF: VDD2 scaling support
[linux-ginger.git] / crypto / cryptd.c
blob35335825a4ef43d6bf3f871d1cd95eb8400795c0
1 /*
2 * Software async crypto daemon.
4 * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au>
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the Free
8 * Software Foundation; either version 2 of the License, or (at your option)
9 * any later version.
13 #include <crypto/algapi.h>
14 #include <crypto/internal/hash.h>
15 #include <crypto/cryptd.h>
16 #include <crypto/crypto_wq.h>
17 #include <linux/err.h>
18 #include <linux/init.h>
19 #include <linux/kernel.h>
20 #include <linux/list.h>
21 #include <linux/module.h>
22 #include <linux/scatterlist.h>
23 #include <linux/sched.h>
24 #include <linux/slab.h>
26 #define CRYPTD_MAX_CPU_QLEN 100
28 struct cryptd_cpu_queue {
29 struct crypto_queue queue;
30 struct work_struct work;
33 struct cryptd_queue {
34 struct cryptd_cpu_queue *cpu_queue;
37 struct cryptd_instance_ctx {
38 struct crypto_spawn spawn;
39 struct cryptd_queue *queue;
42 struct hashd_instance_ctx {
43 struct crypto_shash_spawn spawn;
44 struct cryptd_queue *queue;
47 struct cryptd_blkcipher_ctx {
48 struct crypto_blkcipher *child;
51 struct cryptd_blkcipher_request_ctx {
52 crypto_completion_t complete;
55 struct cryptd_hash_ctx {
56 struct crypto_shash *child;
59 struct cryptd_hash_request_ctx {
60 crypto_completion_t complete;
61 struct shash_desc desc;
64 static void cryptd_queue_worker(struct work_struct *work);
66 static int cryptd_init_queue(struct cryptd_queue *queue,
67 unsigned int max_cpu_qlen)
69 int cpu;
70 struct cryptd_cpu_queue *cpu_queue;
72 queue->cpu_queue = alloc_percpu(struct cryptd_cpu_queue);
73 if (!queue->cpu_queue)
74 return -ENOMEM;
75 for_each_possible_cpu(cpu) {
76 cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu);
77 crypto_init_queue(&cpu_queue->queue, max_cpu_qlen);
78 INIT_WORK(&cpu_queue->work, cryptd_queue_worker);
80 return 0;
83 static void cryptd_fini_queue(struct cryptd_queue *queue)
85 int cpu;
86 struct cryptd_cpu_queue *cpu_queue;
88 for_each_possible_cpu(cpu) {
89 cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu);
90 BUG_ON(cpu_queue->queue.qlen);
92 free_percpu(queue->cpu_queue);
95 static int cryptd_enqueue_request(struct cryptd_queue *queue,
96 struct crypto_async_request *request)
98 int cpu, err;
99 struct cryptd_cpu_queue *cpu_queue;
101 cpu = get_cpu();
102 cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu);
103 err = crypto_enqueue_request(&cpu_queue->queue, request);
104 queue_work_on(cpu, kcrypto_wq, &cpu_queue->work);
105 put_cpu();
107 return err;
110 /* Called in workqueue context, do one real cryption work (via
111 * req->complete) and reschedule itself if there are more work to
112 * do. */
113 static void cryptd_queue_worker(struct work_struct *work)
115 struct cryptd_cpu_queue *cpu_queue;
116 struct crypto_async_request *req, *backlog;
118 cpu_queue = container_of(work, struct cryptd_cpu_queue, work);
119 /* Only handle one request at a time to avoid hogging crypto
120 * workqueue. preempt_disable/enable is used to prevent
121 * being preempted by cryptd_enqueue_request() */
122 preempt_disable();
123 backlog = crypto_get_backlog(&cpu_queue->queue);
124 req = crypto_dequeue_request(&cpu_queue->queue);
125 preempt_enable();
127 if (!req)
128 return;
130 if (backlog)
131 backlog->complete(backlog, -EINPROGRESS);
132 req->complete(req, 0);
134 if (cpu_queue->queue.qlen)
135 queue_work(kcrypto_wq, &cpu_queue->work);
138 static inline struct cryptd_queue *cryptd_get_queue(struct crypto_tfm *tfm)
140 struct crypto_instance *inst = crypto_tfm_alg_instance(tfm);
141 struct cryptd_instance_ctx *ictx = crypto_instance_ctx(inst);
142 return ictx->queue;
145 static int cryptd_blkcipher_setkey(struct crypto_ablkcipher *parent,
146 const u8 *key, unsigned int keylen)
148 struct cryptd_blkcipher_ctx *ctx = crypto_ablkcipher_ctx(parent);
149 struct crypto_blkcipher *child = ctx->child;
150 int err;
152 crypto_blkcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK);
153 crypto_blkcipher_set_flags(child, crypto_ablkcipher_get_flags(parent) &
154 CRYPTO_TFM_REQ_MASK);
155 err = crypto_blkcipher_setkey(child, key, keylen);
156 crypto_ablkcipher_set_flags(parent, crypto_blkcipher_get_flags(child) &
157 CRYPTO_TFM_RES_MASK);
158 return err;
161 static void cryptd_blkcipher_crypt(struct ablkcipher_request *req,
162 struct crypto_blkcipher *child,
163 int err,
164 int (*crypt)(struct blkcipher_desc *desc,
165 struct scatterlist *dst,
166 struct scatterlist *src,
167 unsigned int len))
169 struct cryptd_blkcipher_request_ctx *rctx;
170 struct blkcipher_desc desc;
172 rctx = ablkcipher_request_ctx(req);
174 if (unlikely(err == -EINPROGRESS))
175 goto out;
177 desc.tfm = child;
178 desc.info = req->info;
179 desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP;
181 err = crypt(&desc, req->dst, req->src, req->nbytes);
183 req->base.complete = rctx->complete;
185 out:
186 local_bh_disable();
187 rctx->complete(&req->base, err);
188 local_bh_enable();
191 static void cryptd_blkcipher_encrypt(struct crypto_async_request *req, int err)
193 struct cryptd_blkcipher_ctx *ctx = crypto_tfm_ctx(req->tfm);
194 struct crypto_blkcipher *child = ctx->child;
196 cryptd_blkcipher_crypt(ablkcipher_request_cast(req), child, err,
197 crypto_blkcipher_crt(child)->encrypt);
200 static void cryptd_blkcipher_decrypt(struct crypto_async_request *req, int err)
202 struct cryptd_blkcipher_ctx *ctx = crypto_tfm_ctx(req->tfm);
203 struct crypto_blkcipher *child = ctx->child;
205 cryptd_blkcipher_crypt(ablkcipher_request_cast(req), child, err,
206 crypto_blkcipher_crt(child)->decrypt);
209 static int cryptd_blkcipher_enqueue(struct ablkcipher_request *req,
210 crypto_completion_t complete)
212 struct cryptd_blkcipher_request_ctx *rctx = ablkcipher_request_ctx(req);
213 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
214 struct cryptd_queue *queue;
216 queue = cryptd_get_queue(crypto_ablkcipher_tfm(tfm));
217 rctx->complete = req->base.complete;
218 req->base.complete = complete;
220 return cryptd_enqueue_request(queue, &req->base);
223 static int cryptd_blkcipher_encrypt_enqueue(struct ablkcipher_request *req)
225 return cryptd_blkcipher_enqueue(req, cryptd_blkcipher_encrypt);
228 static int cryptd_blkcipher_decrypt_enqueue(struct ablkcipher_request *req)
230 return cryptd_blkcipher_enqueue(req, cryptd_blkcipher_decrypt);
233 static int cryptd_blkcipher_init_tfm(struct crypto_tfm *tfm)
235 struct crypto_instance *inst = crypto_tfm_alg_instance(tfm);
236 struct cryptd_instance_ctx *ictx = crypto_instance_ctx(inst);
237 struct crypto_spawn *spawn = &ictx->spawn;
238 struct cryptd_blkcipher_ctx *ctx = crypto_tfm_ctx(tfm);
239 struct crypto_blkcipher *cipher;
241 cipher = crypto_spawn_blkcipher(spawn);
242 if (IS_ERR(cipher))
243 return PTR_ERR(cipher);
245 ctx->child = cipher;
246 tfm->crt_ablkcipher.reqsize =
247 sizeof(struct cryptd_blkcipher_request_ctx);
248 return 0;
251 static void cryptd_blkcipher_exit_tfm(struct crypto_tfm *tfm)
253 struct cryptd_blkcipher_ctx *ctx = crypto_tfm_ctx(tfm);
255 crypto_free_blkcipher(ctx->child);
258 static void *cryptd_alloc_instance(struct crypto_alg *alg, unsigned int head,
259 unsigned int tail)
261 char *p;
262 struct crypto_instance *inst;
263 int err;
265 p = kzalloc(head + sizeof(*inst) + tail, GFP_KERNEL);
266 if (!p)
267 return ERR_PTR(-ENOMEM);
269 inst = (void *)(p + head);
271 err = -ENAMETOOLONG;
272 if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME,
273 "cryptd(%s)", alg->cra_driver_name) >= CRYPTO_MAX_ALG_NAME)
274 goto out_free_inst;
276 memcpy(inst->alg.cra_name, alg->cra_name, CRYPTO_MAX_ALG_NAME);
278 inst->alg.cra_priority = alg->cra_priority + 50;
279 inst->alg.cra_blocksize = alg->cra_blocksize;
280 inst->alg.cra_alignmask = alg->cra_alignmask;
282 out:
283 return p;
285 out_free_inst:
286 kfree(p);
287 p = ERR_PTR(err);
288 goto out;
291 static int cryptd_create_blkcipher(struct crypto_template *tmpl,
292 struct rtattr **tb,
293 struct cryptd_queue *queue)
295 struct cryptd_instance_ctx *ctx;
296 struct crypto_instance *inst;
297 struct crypto_alg *alg;
298 int err;
300 alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_BLKCIPHER,
301 CRYPTO_ALG_TYPE_MASK);
302 if (IS_ERR(alg))
303 return PTR_ERR(alg);
305 inst = cryptd_alloc_instance(alg, 0, sizeof(*ctx));
306 err = PTR_ERR(inst);
307 if (IS_ERR(inst))
308 goto out_put_alg;
310 ctx = crypto_instance_ctx(inst);
311 ctx->queue = queue;
313 err = crypto_init_spawn(&ctx->spawn, alg, inst,
314 CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_ASYNC);
315 if (err)
316 goto out_free_inst;
318 inst->alg.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC;
319 inst->alg.cra_type = &crypto_ablkcipher_type;
321 inst->alg.cra_ablkcipher.ivsize = alg->cra_blkcipher.ivsize;
322 inst->alg.cra_ablkcipher.min_keysize = alg->cra_blkcipher.min_keysize;
323 inst->alg.cra_ablkcipher.max_keysize = alg->cra_blkcipher.max_keysize;
325 inst->alg.cra_ablkcipher.geniv = alg->cra_blkcipher.geniv;
327 inst->alg.cra_ctxsize = sizeof(struct cryptd_blkcipher_ctx);
329 inst->alg.cra_init = cryptd_blkcipher_init_tfm;
330 inst->alg.cra_exit = cryptd_blkcipher_exit_tfm;
332 inst->alg.cra_ablkcipher.setkey = cryptd_blkcipher_setkey;
333 inst->alg.cra_ablkcipher.encrypt = cryptd_blkcipher_encrypt_enqueue;
334 inst->alg.cra_ablkcipher.decrypt = cryptd_blkcipher_decrypt_enqueue;
336 err = crypto_register_instance(tmpl, inst);
337 if (err) {
338 crypto_drop_spawn(&ctx->spawn);
339 out_free_inst:
340 kfree(inst);
343 out_put_alg:
344 crypto_mod_put(alg);
345 return err;
348 static int cryptd_hash_init_tfm(struct crypto_tfm *tfm)
350 struct crypto_instance *inst = crypto_tfm_alg_instance(tfm);
351 struct hashd_instance_ctx *ictx = crypto_instance_ctx(inst);
352 struct crypto_shash_spawn *spawn = &ictx->spawn;
353 struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(tfm);
354 struct crypto_shash *hash;
356 hash = crypto_spawn_shash(spawn);
357 if (IS_ERR(hash))
358 return PTR_ERR(hash);
360 ctx->child = hash;
361 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
362 sizeof(struct cryptd_hash_request_ctx) +
363 crypto_shash_descsize(hash));
364 return 0;
367 static void cryptd_hash_exit_tfm(struct crypto_tfm *tfm)
369 struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(tfm);
371 crypto_free_shash(ctx->child);
374 static int cryptd_hash_setkey(struct crypto_ahash *parent,
375 const u8 *key, unsigned int keylen)
377 struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(parent);
378 struct crypto_shash *child = ctx->child;
379 int err;
381 crypto_shash_clear_flags(child, CRYPTO_TFM_REQ_MASK);
382 crypto_shash_set_flags(child, crypto_ahash_get_flags(parent) &
383 CRYPTO_TFM_REQ_MASK);
384 err = crypto_shash_setkey(child, key, keylen);
385 crypto_ahash_set_flags(parent, crypto_shash_get_flags(child) &
386 CRYPTO_TFM_RES_MASK);
387 return err;
390 static int cryptd_hash_enqueue(struct ahash_request *req,
391 crypto_completion_t complete)
393 struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
394 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
395 struct cryptd_queue *queue =
396 cryptd_get_queue(crypto_ahash_tfm(tfm));
398 rctx->complete = req->base.complete;
399 req->base.complete = complete;
401 return cryptd_enqueue_request(queue, &req->base);
404 static void cryptd_hash_init(struct crypto_async_request *req_async, int err)
406 struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm);
407 struct crypto_shash *child = ctx->child;
408 struct ahash_request *req = ahash_request_cast(req_async);
409 struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
410 struct shash_desc *desc = &rctx->desc;
412 if (unlikely(err == -EINPROGRESS))
413 goto out;
415 desc->tfm = child;
416 desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
418 err = crypto_shash_init(desc);
420 req->base.complete = rctx->complete;
422 out:
423 local_bh_disable();
424 rctx->complete(&req->base, err);
425 local_bh_enable();
428 static int cryptd_hash_init_enqueue(struct ahash_request *req)
430 return cryptd_hash_enqueue(req, cryptd_hash_init);
433 static void cryptd_hash_update(struct crypto_async_request *req_async, int err)
435 struct ahash_request *req = ahash_request_cast(req_async);
436 struct cryptd_hash_request_ctx *rctx;
438 rctx = ahash_request_ctx(req);
440 if (unlikely(err == -EINPROGRESS))
441 goto out;
443 err = shash_ahash_update(req, &rctx->desc);
445 req->base.complete = rctx->complete;
447 out:
448 local_bh_disable();
449 rctx->complete(&req->base, err);
450 local_bh_enable();
453 static int cryptd_hash_update_enqueue(struct ahash_request *req)
455 return cryptd_hash_enqueue(req, cryptd_hash_update);
458 static void cryptd_hash_final(struct crypto_async_request *req_async, int err)
460 struct ahash_request *req = ahash_request_cast(req_async);
461 struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
463 if (unlikely(err == -EINPROGRESS))
464 goto out;
466 err = crypto_shash_final(&rctx->desc, req->result);
468 req->base.complete = rctx->complete;
470 out:
471 local_bh_disable();
472 rctx->complete(&req->base, err);
473 local_bh_enable();
476 static int cryptd_hash_final_enqueue(struct ahash_request *req)
478 return cryptd_hash_enqueue(req, cryptd_hash_final);
481 static void cryptd_hash_finup(struct crypto_async_request *req_async, int err)
483 struct ahash_request *req = ahash_request_cast(req_async);
484 struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
486 if (unlikely(err == -EINPROGRESS))
487 goto out;
489 err = shash_ahash_finup(req, &rctx->desc);
491 req->base.complete = rctx->complete;
493 out:
494 local_bh_disable();
495 rctx->complete(&req->base, err);
496 local_bh_enable();
499 static int cryptd_hash_finup_enqueue(struct ahash_request *req)
501 return cryptd_hash_enqueue(req, cryptd_hash_finup);
504 static void cryptd_hash_digest(struct crypto_async_request *req_async, int err)
506 struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm);
507 struct crypto_shash *child = ctx->child;
508 struct ahash_request *req = ahash_request_cast(req_async);
509 struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
510 struct shash_desc *desc = &rctx->desc;
512 if (unlikely(err == -EINPROGRESS))
513 goto out;
515 desc->tfm = child;
516 desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
518 err = shash_ahash_digest(req, desc);
520 req->base.complete = rctx->complete;
522 out:
523 local_bh_disable();
524 rctx->complete(&req->base, err);
525 local_bh_enable();
528 static int cryptd_hash_digest_enqueue(struct ahash_request *req)
530 return cryptd_hash_enqueue(req, cryptd_hash_digest);
533 static int cryptd_hash_export(struct ahash_request *req, void *out)
535 struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
537 return crypto_shash_export(&rctx->desc, out);
540 static int cryptd_hash_import(struct ahash_request *req, const void *in)
542 struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
544 return crypto_shash_import(&rctx->desc, in);
547 static int cryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb,
548 struct cryptd_queue *queue)
550 struct hashd_instance_ctx *ctx;
551 struct ahash_instance *inst;
552 struct shash_alg *salg;
553 struct crypto_alg *alg;
554 int err;
556 salg = shash_attr_alg(tb[1], 0, 0);
557 if (IS_ERR(salg))
558 return PTR_ERR(salg);
560 alg = &salg->base;
561 inst = cryptd_alloc_instance(alg, ahash_instance_headroom(),
562 sizeof(*ctx));
563 err = PTR_ERR(inst);
564 if (IS_ERR(inst))
565 goto out_put_alg;
567 ctx = ahash_instance_ctx(inst);
568 ctx->queue = queue;
570 err = crypto_init_shash_spawn(&ctx->spawn, salg,
571 ahash_crypto_instance(inst));
572 if (err)
573 goto out_free_inst;
575 inst->alg.halg.base.cra_flags = CRYPTO_ALG_ASYNC;
577 inst->alg.halg.digestsize = salg->digestsize;
578 inst->alg.halg.base.cra_ctxsize = sizeof(struct cryptd_hash_ctx);
580 inst->alg.halg.base.cra_init = cryptd_hash_init_tfm;
581 inst->alg.halg.base.cra_exit = cryptd_hash_exit_tfm;
583 inst->alg.init = cryptd_hash_init_enqueue;
584 inst->alg.update = cryptd_hash_update_enqueue;
585 inst->alg.final = cryptd_hash_final_enqueue;
586 inst->alg.finup = cryptd_hash_finup_enqueue;
587 inst->alg.export = cryptd_hash_export;
588 inst->alg.import = cryptd_hash_import;
589 inst->alg.setkey = cryptd_hash_setkey;
590 inst->alg.digest = cryptd_hash_digest_enqueue;
592 err = ahash_register_instance(tmpl, inst);
593 if (err) {
594 crypto_drop_shash(&ctx->spawn);
595 out_free_inst:
596 kfree(inst);
599 out_put_alg:
600 crypto_mod_put(alg);
601 return err;
604 static struct cryptd_queue queue;
606 static int cryptd_create(struct crypto_template *tmpl, struct rtattr **tb)
608 struct crypto_attr_type *algt;
610 algt = crypto_get_attr_type(tb);
611 if (IS_ERR(algt))
612 return PTR_ERR(algt);
614 switch (algt->type & algt->mask & CRYPTO_ALG_TYPE_MASK) {
615 case CRYPTO_ALG_TYPE_BLKCIPHER:
616 return cryptd_create_blkcipher(tmpl, tb, &queue);
617 case CRYPTO_ALG_TYPE_DIGEST:
618 return cryptd_create_hash(tmpl, tb, &queue);
621 return -EINVAL;
624 static void cryptd_free(struct crypto_instance *inst)
626 struct cryptd_instance_ctx *ctx = crypto_instance_ctx(inst);
627 struct hashd_instance_ctx *hctx = crypto_instance_ctx(inst);
629 switch (inst->alg.cra_flags & CRYPTO_ALG_TYPE_MASK) {
630 case CRYPTO_ALG_TYPE_AHASH:
631 crypto_drop_shash(&hctx->spawn);
632 kfree(ahash_instance(inst));
633 return;
636 crypto_drop_spawn(&ctx->spawn);
637 kfree(inst);
640 static struct crypto_template cryptd_tmpl = {
641 .name = "cryptd",
642 .create = cryptd_create,
643 .free = cryptd_free,
644 .module = THIS_MODULE,
647 struct cryptd_ablkcipher *cryptd_alloc_ablkcipher(const char *alg_name,
648 u32 type, u32 mask)
650 char cryptd_alg_name[CRYPTO_MAX_ALG_NAME];
651 struct crypto_tfm *tfm;
653 if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME,
654 "cryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME)
655 return ERR_PTR(-EINVAL);
656 type &= ~(CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_GENIV);
657 type |= CRYPTO_ALG_TYPE_BLKCIPHER;
658 mask &= ~CRYPTO_ALG_TYPE_MASK;
659 mask |= (CRYPTO_ALG_GENIV | CRYPTO_ALG_TYPE_BLKCIPHER_MASK);
660 tfm = crypto_alloc_base(cryptd_alg_name, type, mask);
661 if (IS_ERR(tfm))
662 return ERR_CAST(tfm);
663 if (tfm->__crt_alg->cra_module != THIS_MODULE) {
664 crypto_free_tfm(tfm);
665 return ERR_PTR(-EINVAL);
668 return __cryptd_ablkcipher_cast(__crypto_ablkcipher_cast(tfm));
670 EXPORT_SYMBOL_GPL(cryptd_alloc_ablkcipher);
672 struct crypto_blkcipher *cryptd_ablkcipher_child(struct cryptd_ablkcipher *tfm)
674 struct cryptd_blkcipher_ctx *ctx = crypto_ablkcipher_ctx(&tfm->base);
675 return ctx->child;
677 EXPORT_SYMBOL_GPL(cryptd_ablkcipher_child);
679 void cryptd_free_ablkcipher(struct cryptd_ablkcipher *tfm)
681 crypto_free_ablkcipher(&tfm->base);
683 EXPORT_SYMBOL_GPL(cryptd_free_ablkcipher);
685 struct cryptd_ahash *cryptd_alloc_ahash(const char *alg_name,
686 u32 type, u32 mask)
688 char cryptd_alg_name[CRYPTO_MAX_ALG_NAME];
689 struct crypto_ahash *tfm;
691 if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME,
692 "cryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME)
693 return ERR_PTR(-EINVAL);
694 tfm = crypto_alloc_ahash(cryptd_alg_name, type, mask);
695 if (IS_ERR(tfm))
696 return ERR_CAST(tfm);
697 if (tfm->base.__crt_alg->cra_module != THIS_MODULE) {
698 crypto_free_ahash(tfm);
699 return ERR_PTR(-EINVAL);
702 return __cryptd_ahash_cast(tfm);
704 EXPORT_SYMBOL_GPL(cryptd_alloc_ahash);
706 struct crypto_shash *cryptd_ahash_child(struct cryptd_ahash *tfm)
708 struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(&tfm->base);
710 return ctx->child;
712 EXPORT_SYMBOL_GPL(cryptd_ahash_child);
714 void cryptd_free_ahash(struct cryptd_ahash *tfm)
716 crypto_free_ahash(&tfm->base);
718 EXPORT_SYMBOL_GPL(cryptd_free_ahash);
720 static int __init cryptd_init(void)
722 int err;
724 err = cryptd_init_queue(&queue, CRYPTD_MAX_CPU_QLEN);
725 if (err)
726 return err;
728 err = crypto_register_template(&cryptd_tmpl);
729 if (err)
730 cryptd_fini_queue(&queue);
732 return err;
735 static void __exit cryptd_exit(void)
737 cryptd_fini_queue(&queue);
738 crypto_unregister_template(&cryptd_tmpl);
741 module_init(cryptd_init);
742 module_exit(cryptd_exit);
744 MODULE_LICENSE("GPL");
745 MODULE_DESCRIPTION("Software async crypto daemon");