drm/radeon: add a PX quirk for another K53TK variant
[linux/fpc-iii.git] / crypto / mcryptd.c
blobc207458d62993350d9a0cfca5e1deca573b09c8c
1 /*
2 * Software multibuffer async crypto daemon.
4 * Copyright (c) 2014 Tim Chen <tim.c.chen@linux.intel.com>
6 * Adapted from crypto daemon.
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the Free
10 * Software Foundation; either version 2 of the License, or (at your option)
11 * any later version.
15 #include <crypto/algapi.h>
16 #include <crypto/internal/hash.h>
17 #include <crypto/internal/aead.h>
18 #include <crypto/mcryptd.h>
19 #include <crypto/crypto_wq.h>
20 #include <linux/err.h>
21 #include <linux/init.h>
22 #include <linux/kernel.h>
23 #include <linux/list.h>
24 #include <linux/module.h>
25 #include <linux/scatterlist.h>
26 #include <linux/sched.h>
27 #include <linux/slab.h>
28 #include <linux/hardirq.h>
30 #define MCRYPTD_MAX_CPU_QLEN 100
31 #define MCRYPTD_BATCH 9
33 static void *mcryptd_alloc_instance(struct crypto_alg *alg, unsigned int head,
34 unsigned int tail);
36 struct mcryptd_flush_list {
37 struct list_head list;
38 struct mutex lock;
41 static struct mcryptd_flush_list __percpu *mcryptd_flist;
43 struct hashd_instance_ctx {
44 struct crypto_ahash_spawn spawn;
45 struct mcryptd_queue *queue;
48 static void mcryptd_queue_worker(struct work_struct *work);
50 void mcryptd_arm_flusher(struct mcryptd_alg_cstate *cstate, unsigned long delay)
52 struct mcryptd_flush_list *flist;
54 if (!cstate->flusher_engaged) {
55 /* put the flusher on the flush list */
56 flist = per_cpu_ptr(mcryptd_flist, smp_processor_id());
57 mutex_lock(&flist->lock);
58 list_add_tail(&cstate->flush_list, &flist->list);
59 cstate->flusher_engaged = true;
60 cstate->next_flush = jiffies + delay;
61 queue_delayed_work_on(smp_processor_id(), kcrypto_wq,
62 &cstate->flush, delay);
63 mutex_unlock(&flist->lock);
66 EXPORT_SYMBOL(mcryptd_arm_flusher);
68 static int mcryptd_init_queue(struct mcryptd_queue *queue,
69 unsigned int max_cpu_qlen)
71 int cpu;
72 struct mcryptd_cpu_queue *cpu_queue;
74 queue->cpu_queue = alloc_percpu(struct mcryptd_cpu_queue);
75 pr_debug("mqueue:%p mcryptd_cpu_queue %p\n", queue, queue->cpu_queue);
76 if (!queue->cpu_queue)
77 return -ENOMEM;
78 for_each_possible_cpu(cpu) {
79 cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu);
80 pr_debug("cpu_queue #%d %p\n", cpu, queue->cpu_queue);
81 crypto_init_queue(&cpu_queue->queue, max_cpu_qlen);
82 INIT_WORK(&cpu_queue->work, mcryptd_queue_worker);
84 return 0;
87 static void mcryptd_fini_queue(struct mcryptd_queue *queue)
89 int cpu;
90 struct mcryptd_cpu_queue *cpu_queue;
92 for_each_possible_cpu(cpu) {
93 cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu);
94 BUG_ON(cpu_queue->queue.qlen);
96 free_percpu(queue->cpu_queue);
99 static int mcryptd_enqueue_request(struct mcryptd_queue *queue,
100 struct crypto_async_request *request,
101 struct mcryptd_hash_request_ctx *rctx)
103 int cpu, err;
104 struct mcryptd_cpu_queue *cpu_queue;
106 cpu = get_cpu();
107 cpu_queue = this_cpu_ptr(queue->cpu_queue);
108 rctx->tag.cpu = cpu;
110 err = crypto_enqueue_request(&cpu_queue->queue, request);
111 pr_debug("enqueue request: cpu %d cpu_queue %p request %p\n",
112 cpu, cpu_queue, request);
113 queue_work_on(cpu, kcrypto_wq, &cpu_queue->work);
114 put_cpu();
116 return err;
120 * Try to opportunisticlly flush the partially completed jobs if
121 * crypto daemon is the only task running.
123 static void mcryptd_opportunistic_flush(void)
125 struct mcryptd_flush_list *flist;
126 struct mcryptd_alg_cstate *cstate;
128 flist = per_cpu_ptr(mcryptd_flist, smp_processor_id());
129 while (single_task_running()) {
130 mutex_lock(&flist->lock);
131 cstate = list_first_entry_or_null(&flist->list,
132 struct mcryptd_alg_cstate, flush_list);
133 if (!cstate || !cstate->flusher_engaged) {
134 mutex_unlock(&flist->lock);
135 return;
137 list_del(&cstate->flush_list);
138 cstate->flusher_engaged = false;
139 mutex_unlock(&flist->lock);
140 cstate->alg_state->flusher(cstate);
145 * Called in workqueue context, do one real cryption work (via
146 * req->complete) and reschedule itself if there are more work to
147 * do.
149 static void mcryptd_queue_worker(struct work_struct *work)
151 struct mcryptd_cpu_queue *cpu_queue;
152 struct crypto_async_request *req, *backlog;
153 int i;
156 * Need to loop through more than once for multi-buffer to
157 * be effective.
160 cpu_queue = container_of(work, struct mcryptd_cpu_queue, work);
161 i = 0;
162 while (i < MCRYPTD_BATCH || single_task_running()) {
164 * preempt_disable/enable is used to prevent
165 * being preempted by mcryptd_enqueue_request()
167 local_bh_disable();
168 preempt_disable();
169 backlog = crypto_get_backlog(&cpu_queue->queue);
170 req = crypto_dequeue_request(&cpu_queue->queue);
171 preempt_enable();
172 local_bh_enable();
174 if (!req) {
175 mcryptd_opportunistic_flush();
176 return;
179 if (backlog)
180 backlog->complete(backlog, -EINPROGRESS);
181 req->complete(req, 0);
182 if (!cpu_queue->queue.qlen)
183 return;
184 ++i;
186 if (cpu_queue->queue.qlen)
187 queue_work(kcrypto_wq, &cpu_queue->work);
190 void mcryptd_flusher(struct work_struct *__work)
192 struct mcryptd_alg_cstate *alg_cpu_state;
193 struct mcryptd_alg_state *alg_state;
194 struct mcryptd_flush_list *flist;
195 int cpu;
197 cpu = smp_processor_id();
198 alg_cpu_state = container_of(to_delayed_work(__work),
199 struct mcryptd_alg_cstate, flush);
200 alg_state = alg_cpu_state->alg_state;
201 if (alg_cpu_state->cpu != cpu)
202 pr_debug("mcryptd error: work on cpu %d, should be cpu %d\n",
203 cpu, alg_cpu_state->cpu);
205 if (alg_cpu_state->flusher_engaged) {
206 flist = per_cpu_ptr(mcryptd_flist, cpu);
207 mutex_lock(&flist->lock);
208 list_del(&alg_cpu_state->flush_list);
209 alg_cpu_state->flusher_engaged = false;
210 mutex_unlock(&flist->lock);
211 alg_state->flusher(alg_cpu_state);
214 EXPORT_SYMBOL_GPL(mcryptd_flusher);
216 static inline struct mcryptd_queue *mcryptd_get_queue(struct crypto_tfm *tfm)
218 struct crypto_instance *inst = crypto_tfm_alg_instance(tfm);
219 struct mcryptd_instance_ctx *ictx = crypto_instance_ctx(inst);
221 return ictx->queue;
224 static void *mcryptd_alloc_instance(struct crypto_alg *alg, unsigned int head,
225 unsigned int tail)
227 char *p;
228 struct crypto_instance *inst;
229 int err;
231 p = kzalloc(head + sizeof(*inst) + tail, GFP_KERNEL);
232 if (!p)
233 return ERR_PTR(-ENOMEM);
235 inst = (void *)(p + head);
237 err = -ENAMETOOLONG;
238 if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME,
239 "mcryptd(%s)", alg->cra_driver_name) >= CRYPTO_MAX_ALG_NAME)
240 goto out_free_inst;
242 memcpy(inst->alg.cra_name, alg->cra_name, CRYPTO_MAX_ALG_NAME);
244 inst->alg.cra_priority = alg->cra_priority + 50;
245 inst->alg.cra_blocksize = alg->cra_blocksize;
246 inst->alg.cra_alignmask = alg->cra_alignmask;
248 out:
249 return p;
251 out_free_inst:
252 kfree(p);
253 p = ERR_PTR(err);
254 goto out;
257 static inline bool mcryptd_check_internal(struct rtattr **tb, u32 *type,
258 u32 *mask)
260 struct crypto_attr_type *algt;
262 algt = crypto_get_attr_type(tb);
263 if (IS_ERR(algt))
264 return false;
266 *type |= algt->type & CRYPTO_ALG_INTERNAL;
267 *mask |= algt->mask & CRYPTO_ALG_INTERNAL;
269 if (*type & *mask & CRYPTO_ALG_INTERNAL)
270 return true;
271 else
272 return false;
275 static int mcryptd_hash_init_tfm(struct crypto_tfm *tfm)
277 struct crypto_instance *inst = crypto_tfm_alg_instance(tfm);
278 struct hashd_instance_ctx *ictx = crypto_instance_ctx(inst);
279 struct crypto_ahash_spawn *spawn = &ictx->spawn;
280 struct mcryptd_hash_ctx *ctx = crypto_tfm_ctx(tfm);
281 struct crypto_ahash *hash;
283 hash = crypto_spawn_ahash(spawn);
284 if (IS_ERR(hash))
285 return PTR_ERR(hash);
287 ctx->child = hash;
288 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
289 sizeof(struct mcryptd_hash_request_ctx) +
290 crypto_ahash_reqsize(hash));
291 return 0;
294 static void mcryptd_hash_exit_tfm(struct crypto_tfm *tfm)
296 struct mcryptd_hash_ctx *ctx = crypto_tfm_ctx(tfm);
298 crypto_free_ahash(ctx->child);
301 static int mcryptd_hash_setkey(struct crypto_ahash *parent,
302 const u8 *key, unsigned int keylen)
304 struct mcryptd_hash_ctx *ctx = crypto_ahash_ctx(parent);
305 struct crypto_ahash *child = ctx->child;
306 int err;
308 crypto_ahash_clear_flags(child, CRYPTO_TFM_REQ_MASK);
309 crypto_ahash_set_flags(child, crypto_ahash_get_flags(parent) &
310 CRYPTO_TFM_REQ_MASK);
311 err = crypto_ahash_setkey(child, key, keylen);
312 crypto_ahash_set_flags(parent, crypto_ahash_get_flags(child) &
313 CRYPTO_TFM_RES_MASK);
314 return err;
317 static int mcryptd_hash_enqueue(struct ahash_request *req,
318 crypto_completion_t complete)
320 int ret;
322 struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
323 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
324 struct mcryptd_queue *queue =
325 mcryptd_get_queue(crypto_ahash_tfm(tfm));
327 rctx->complete = req->base.complete;
328 req->base.complete = complete;
330 ret = mcryptd_enqueue_request(queue, &req->base, rctx);
332 return ret;
335 static void mcryptd_hash_init(struct crypto_async_request *req_async, int err)
337 struct mcryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm);
338 struct crypto_ahash *child = ctx->child;
339 struct ahash_request *req = ahash_request_cast(req_async);
340 struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
341 struct ahash_request *desc = &rctx->areq;
343 if (unlikely(err == -EINPROGRESS))
344 goto out;
346 ahash_request_set_tfm(desc, child);
347 ahash_request_set_callback(desc, CRYPTO_TFM_REQ_MAY_SLEEP,
348 rctx->complete, req_async);
350 rctx->out = req->result;
351 err = crypto_ahash_init(desc);
353 out:
354 local_bh_disable();
355 rctx->complete(&req->base, err);
356 local_bh_enable();
359 static int mcryptd_hash_init_enqueue(struct ahash_request *req)
361 return mcryptd_hash_enqueue(req, mcryptd_hash_init);
364 static void mcryptd_hash_update(struct crypto_async_request *req_async, int err)
366 struct ahash_request *req = ahash_request_cast(req_async);
367 struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
369 if (unlikely(err == -EINPROGRESS))
370 goto out;
372 rctx->out = req->result;
373 err = ahash_mcryptd_update(&rctx->areq);
374 if (err) {
375 req->base.complete = rctx->complete;
376 goto out;
379 return;
380 out:
381 local_bh_disable();
382 rctx->complete(&req->base, err);
383 local_bh_enable();
386 static int mcryptd_hash_update_enqueue(struct ahash_request *req)
388 return mcryptd_hash_enqueue(req, mcryptd_hash_update);
391 static void mcryptd_hash_final(struct crypto_async_request *req_async, int err)
393 struct ahash_request *req = ahash_request_cast(req_async);
394 struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
396 if (unlikely(err == -EINPROGRESS))
397 goto out;
399 rctx->out = req->result;
400 err = ahash_mcryptd_final(&rctx->areq);
401 if (err) {
402 req->base.complete = rctx->complete;
403 goto out;
406 return;
407 out:
408 local_bh_disable();
409 rctx->complete(&req->base, err);
410 local_bh_enable();
413 static int mcryptd_hash_final_enqueue(struct ahash_request *req)
415 return mcryptd_hash_enqueue(req, mcryptd_hash_final);
418 static void mcryptd_hash_finup(struct crypto_async_request *req_async, int err)
420 struct ahash_request *req = ahash_request_cast(req_async);
421 struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
423 if (unlikely(err == -EINPROGRESS))
424 goto out;
425 rctx->out = req->result;
426 err = ahash_mcryptd_finup(&rctx->areq);
428 if (err) {
429 req->base.complete = rctx->complete;
430 goto out;
433 return;
434 out:
435 local_bh_disable();
436 rctx->complete(&req->base, err);
437 local_bh_enable();
440 static int mcryptd_hash_finup_enqueue(struct ahash_request *req)
442 return mcryptd_hash_enqueue(req, mcryptd_hash_finup);
445 static void mcryptd_hash_digest(struct crypto_async_request *req_async, int err)
447 struct mcryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm);
448 struct crypto_ahash *child = ctx->child;
449 struct ahash_request *req = ahash_request_cast(req_async);
450 struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
451 struct ahash_request *desc = &rctx->areq;
453 if (unlikely(err == -EINPROGRESS))
454 goto out;
456 ahash_request_set_tfm(desc, child);
457 ahash_request_set_callback(desc, CRYPTO_TFM_REQ_MAY_SLEEP,
458 rctx->complete, req_async);
460 rctx->out = req->result;
461 err = ahash_mcryptd_digest(desc);
463 out:
464 local_bh_disable();
465 rctx->complete(&req->base, err);
466 local_bh_enable();
469 static int mcryptd_hash_digest_enqueue(struct ahash_request *req)
471 return mcryptd_hash_enqueue(req, mcryptd_hash_digest);
474 static int mcryptd_hash_export(struct ahash_request *req, void *out)
476 struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
478 return crypto_ahash_export(&rctx->areq, out);
481 static int mcryptd_hash_import(struct ahash_request *req, const void *in)
483 struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
485 return crypto_ahash_import(&rctx->areq, in);
488 static int mcryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb,
489 struct mcryptd_queue *queue)
491 struct hashd_instance_ctx *ctx;
492 struct ahash_instance *inst;
493 struct hash_alg_common *halg;
494 struct crypto_alg *alg;
495 u32 type = 0;
496 u32 mask = 0;
497 int err;
499 if (!mcryptd_check_internal(tb, &type, &mask))
500 return -EINVAL;
502 halg = ahash_attr_alg(tb[1], type, mask);
503 if (IS_ERR(halg))
504 return PTR_ERR(halg);
506 alg = &halg->base;
507 pr_debug("crypto: mcryptd hash alg: %s\n", alg->cra_name);
508 inst = mcryptd_alloc_instance(alg, ahash_instance_headroom(),
509 sizeof(*ctx));
510 err = PTR_ERR(inst);
511 if (IS_ERR(inst))
512 goto out_put_alg;
514 ctx = ahash_instance_ctx(inst);
515 ctx->queue = queue;
517 err = crypto_init_ahash_spawn(&ctx->spawn, halg,
518 ahash_crypto_instance(inst));
519 if (err)
520 goto out_free_inst;
522 type = CRYPTO_ALG_ASYNC;
523 if (alg->cra_flags & CRYPTO_ALG_INTERNAL)
524 type |= CRYPTO_ALG_INTERNAL;
525 inst->alg.halg.base.cra_flags = type;
527 inst->alg.halg.digestsize = halg->digestsize;
528 inst->alg.halg.statesize = halg->statesize;
529 inst->alg.halg.base.cra_ctxsize = sizeof(struct mcryptd_hash_ctx);
531 inst->alg.halg.base.cra_init = mcryptd_hash_init_tfm;
532 inst->alg.halg.base.cra_exit = mcryptd_hash_exit_tfm;
534 inst->alg.init = mcryptd_hash_init_enqueue;
535 inst->alg.update = mcryptd_hash_update_enqueue;
536 inst->alg.final = mcryptd_hash_final_enqueue;
537 inst->alg.finup = mcryptd_hash_finup_enqueue;
538 inst->alg.export = mcryptd_hash_export;
539 inst->alg.import = mcryptd_hash_import;
540 inst->alg.setkey = mcryptd_hash_setkey;
541 inst->alg.digest = mcryptd_hash_digest_enqueue;
543 err = ahash_register_instance(tmpl, inst);
544 if (err) {
545 crypto_drop_ahash(&ctx->spawn);
546 out_free_inst:
547 kfree(inst);
550 out_put_alg:
551 crypto_mod_put(alg);
552 return err;
555 static struct mcryptd_queue mqueue;
557 static int mcryptd_create(struct crypto_template *tmpl, struct rtattr **tb)
559 struct crypto_attr_type *algt;
561 algt = crypto_get_attr_type(tb);
562 if (IS_ERR(algt))
563 return PTR_ERR(algt);
565 switch (algt->type & algt->mask & CRYPTO_ALG_TYPE_MASK) {
566 case CRYPTO_ALG_TYPE_DIGEST:
567 return mcryptd_create_hash(tmpl, tb, &mqueue);
568 break;
571 return -EINVAL;
574 static void mcryptd_free(struct crypto_instance *inst)
576 struct mcryptd_instance_ctx *ctx = crypto_instance_ctx(inst);
577 struct hashd_instance_ctx *hctx = crypto_instance_ctx(inst);
579 switch (inst->alg.cra_flags & CRYPTO_ALG_TYPE_MASK) {
580 case CRYPTO_ALG_TYPE_AHASH:
581 crypto_drop_ahash(&hctx->spawn);
582 kfree(ahash_instance(inst));
583 return;
584 default:
585 crypto_drop_spawn(&ctx->spawn);
586 kfree(inst);
590 static struct crypto_template mcryptd_tmpl = {
591 .name = "mcryptd",
592 .create = mcryptd_create,
593 .free = mcryptd_free,
594 .module = THIS_MODULE,
597 struct mcryptd_ahash *mcryptd_alloc_ahash(const char *alg_name,
598 u32 type, u32 mask)
600 char mcryptd_alg_name[CRYPTO_MAX_ALG_NAME];
601 struct crypto_ahash *tfm;
603 if (snprintf(mcryptd_alg_name, CRYPTO_MAX_ALG_NAME,
604 "mcryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME)
605 return ERR_PTR(-EINVAL);
606 tfm = crypto_alloc_ahash(mcryptd_alg_name, type, mask);
607 if (IS_ERR(tfm))
608 return ERR_CAST(tfm);
609 if (tfm->base.__crt_alg->cra_module != THIS_MODULE) {
610 crypto_free_ahash(tfm);
611 return ERR_PTR(-EINVAL);
614 return __mcryptd_ahash_cast(tfm);
616 EXPORT_SYMBOL_GPL(mcryptd_alloc_ahash);
618 int ahash_mcryptd_digest(struct ahash_request *desc)
620 return crypto_ahash_init(desc) ?: ahash_mcryptd_finup(desc);
623 int ahash_mcryptd_update(struct ahash_request *desc)
625 /* alignment is to be done by multi-buffer crypto algorithm if needed */
627 return crypto_ahash_update(desc);
630 int ahash_mcryptd_finup(struct ahash_request *desc)
632 /* alignment is to be done by multi-buffer crypto algorithm if needed */
634 return crypto_ahash_finup(desc);
637 int ahash_mcryptd_final(struct ahash_request *desc)
639 /* alignment is to be done by multi-buffer crypto algorithm if needed */
641 return crypto_ahash_final(desc);
644 struct crypto_ahash *mcryptd_ahash_child(struct mcryptd_ahash *tfm)
646 struct mcryptd_hash_ctx *ctx = crypto_ahash_ctx(&tfm->base);
648 return ctx->child;
650 EXPORT_SYMBOL_GPL(mcryptd_ahash_child);
652 struct ahash_request *mcryptd_ahash_desc(struct ahash_request *req)
654 struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
655 return &rctx->areq;
657 EXPORT_SYMBOL_GPL(mcryptd_ahash_desc);
659 void mcryptd_free_ahash(struct mcryptd_ahash *tfm)
661 crypto_free_ahash(&tfm->base);
663 EXPORT_SYMBOL_GPL(mcryptd_free_ahash);
665 static int __init mcryptd_init(void)
667 int err, cpu;
668 struct mcryptd_flush_list *flist;
670 mcryptd_flist = alloc_percpu(struct mcryptd_flush_list);
671 for_each_possible_cpu(cpu) {
672 flist = per_cpu_ptr(mcryptd_flist, cpu);
673 INIT_LIST_HEAD(&flist->list);
674 mutex_init(&flist->lock);
677 err = mcryptd_init_queue(&mqueue, MCRYPTD_MAX_CPU_QLEN);
678 if (err) {
679 free_percpu(mcryptd_flist);
680 return err;
683 err = crypto_register_template(&mcryptd_tmpl);
684 if (err) {
685 mcryptd_fini_queue(&mqueue);
686 free_percpu(mcryptd_flist);
689 return err;
692 static void __exit mcryptd_exit(void)
694 mcryptd_fini_queue(&mqueue);
695 crypto_unregister_template(&mcryptd_tmpl);
696 free_percpu(mcryptd_flist);
699 subsys_initcall(mcryptd_init);
700 module_exit(mcryptd_exit);
702 MODULE_LICENSE("GPL");
703 MODULE_DESCRIPTION("Software async multibuffer crypto daemon");
704 MODULE_ALIAS_CRYPTO("mcryptd");