KEYS: add missing permission check for request_key() destination
[linux/fpc-iii.git] / crypto / ahash.c
blobc2982958a2a0190c9d114f676e9d5e88225e93e7
1 /*
2 * Asynchronous Cryptographic Hash operations.
4 * This is the asynchronous version of hash.c with notification of
5 * completion via a callback.
7 * Copyright (c) 2008 Loc Ho <lho@amcc.com>
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms of the GNU General Public License as published by the Free
11 * Software Foundation; either version 2 of the License, or (at your option)
12 * any later version.
16 #include <crypto/internal/hash.h>
17 #include <crypto/scatterwalk.h>
18 #include <linux/bug.h>
19 #include <linux/err.h>
20 #include <linux/kernel.h>
21 #include <linux/module.h>
22 #include <linux/sched.h>
23 #include <linux/slab.h>
24 #include <linux/seq_file.h>
25 #include <linux/cryptouser.h>
26 #include <net/netlink.h>
28 #include "internal.h"
30 struct ahash_request_priv {
31 crypto_completion_t complete;
32 void *data;
33 u8 *result;
34 u32 flags;
35 void *ubuf[] CRYPTO_MINALIGN_ATTR;
38 static inline struct ahash_alg *crypto_ahash_alg(struct crypto_ahash *hash)
40 return container_of(crypto_hash_alg_common(hash), struct ahash_alg,
41 halg);
44 static int hash_walk_next(struct crypto_hash_walk *walk)
46 unsigned int alignmask = walk->alignmask;
47 unsigned int offset = walk->offset;
48 unsigned int nbytes = min(walk->entrylen,
49 ((unsigned int)(PAGE_SIZE)) - offset);
51 if (walk->flags & CRYPTO_ALG_ASYNC)
52 walk->data = kmap(walk->pg);
53 else
54 walk->data = kmap_atomic(walk->pg);
55 walk->data += offset;
57 if (offset & alignmask) {
58 unsigned int unaligned = alignmask + 1 - (offset & alignmask);
59 if (nbytes > unaligned)
60 nbytes = unaligned;
63 walk->entrylen -= nbytes;
64 return nbytes;
67 static int hash_walk_new_entry(struct crypto_hash_walk *walk)
69 struct scatterlist *sg;
71 sg = walk->sg;
72 walk->offset = sg->offset;
73 walk->pg = sg_page(walk->sg) + (walk->offset >> PAGE_SHIFT);
74 walk->offset = offset_in_page(walk->offset);
75 walk->entrylen = sg->length;
77 if (walk->entrylen > walk->total)
78 walk->entrylen = walk->total;
79 walk->total -= walk->entrylen;
81 return hash_walk_next(walk);
84 int crypto_hash_walk_done(struct crypto_hash_walk *walk, int err)
86 unsigned int alignmask = walk->alignmask;
87 unsigned int nbytes = walk->entrylen;
89 walk->data -= walk->offset;
91 if (nbytes && walk->offset & alignmask && !err) {
92 walk->offset = ALIGN(walk->offset, alignmask + 1);
93 walk->data += walk->offset;
95 nbytes = min(nbytes,
96 ((unsigned int)(PAGE_SIZE)) - walk->offset);
97 walk->entrylen -= nbytes;
99 return nbytes;
102 if (walk->flags & CRYPTO_ALG_ASYNC)
103 kunmap(walk->pg);
104 else {
105 kunmap_atomic(walk->data);
107 * The may sleep test only makes sense for sync users.
108 * Async users don't need to sleep here anyway.
110 crypto_yield(walk->flags);
113 if (err)
114 return err;
116 if (nbytes) {
117 walk->offset = 0;
118 walk->pg++;
119 return hash_walk_next(walk);
122 if (!walk->total)
123 return 0;
125 walk->sg = scatterwalk_sg_next(walk->sg);
127 return hash_walk_new_entry(walk);
129 EXPORT_SYMBOL_GPL(crypto_hash_walk_done);
131 int crypto_hash_walk_first(struct ahash_request *req,
132 struct crypto_hash_walk *walk)
134 walk->total = req->nbytes;
136 if (!walk->total)
137 return 0;
139 walk->alignmask = crypto_ahash_alignmask(crypto_ahash_reqtfm(req));
140 walk->sg = req->src;
141 walk->flags = req->base.flags & CRYPTO_TFM_REQ_MASK;
143 return hash_walk_new_entry(walk);
145 EXPORT_SYMBOL_GPL(crypto_hash_walk_first);
147 int crypto_ahash_walk_first(struct ahash_request *req,
148 struct crypto_hash_walk *walk)
150 walk->total = req->nbytes;
152 if (!walk->total)
153 return 0;
155 walk->alignmask = crypto_ahash_alignmask(crypto_ahash_reqtfm(req));
156 walk->sg = req->src;
157 walk->flags = req->base.flags & CRYPTO_TFM_REQ_MASK;
158 walk->flags |= CRYPTO_ALG_ASYNC;
160 BUILD_BUG_ON(CRYPTO_TFM_REQ_MASK & CRYPTO_ALG_ASYNC);
162 return hash_walk_new_entry(walk);
164 EXPORT_SYMBOL_GPL(crypto_ahash_walk_first);
166 int crypto_hash_walk_first_compat(struct hash_desc *hdesc,
167 struct crypto_hash_walk *walk,
168 struct scatterlist *sg, unsigned int len)
170 walk->total = len;
172 if (!walk->total)
173 return 0;
175 walk->alignmask = crypto_hash_alignmask(hdesc->tfm);
176 walk->sg = sg;
177 walk->flags = hdesc->flags & CRYPTO_TFM_REQ_MASK;
179 return hash_walk_new_entry(walk);
182 static int ahash_setkey_unaligned(struct crypto_ahash *tfm, const u8 *key,
183 unsigned int keylen)
185 unsigned long alignmask = crypto_ahash_alignmask(tfm);
186 int ret;
187 u8 *buffer, *alignbuffer;
188 unsigned long absize;
190 absize = keylen + alignmask;
191 buffer = kmalloc(absize, GFP_KERNEL);
192 if (!buffer)
193 return -ENOMEM;
195 alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1);
196 memcpy(alignbuffer, key, keylen);
197 ret = tfm->setkey(tfm, alignbuffer, keylen);
198 kzfree(buffer);
199 return ret;
202 int crypto_ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
203 unsigned int keylen)
205 unsigned long alignmask = crypto_ahash_alignmask(tfm);
207 if ((unsigned long)key & alignmask)
208 return ahash_setkey_unaligned(tfm, key, keylen);
210 return tfm->setkey(tfm, key, keylen);
212 EXPORT_SYMBOL_GPL(crypto_ahash_setkey);
214 static int ahash_nosetkey(struct crypto_ahash *tfm, const u8 *key,
215 unsigned int keylen)
217 return -ENOSYS;
220 static inline unsigned int ahash_align_buffer_size(unsigned len,
221 unsigned long mask)
223 return len + (mask & ~(crypto_tfm_ctx_alignment() - 1));
226 static int ahash_save_req(struct ahash_request *req, crypto_completion_t cplt)
228 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
229 unsigned long alignmask = crypto_ahash_alignmask(tfm);
230 unsigned int ds = crypto_ahash_digestsize(tfm);
231 struct ahash_request_priv *priv;
233 priv = kmalloc(sizeof(*priv) + ahash_align_buffer_size(ds, alignmask),
234 (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
235 GFP_KERNEL : GFP_ATOMIC);
236 if (!priv)
237 return -ENOMEM;
240 * WARNING: Voodoo programming below!
242 * The code below is obscure and hard to understand, thus explanation
243 * is necessary. See include/crypto/hash.h and include/linux/crypto.h
244 * to understand the layout of structures used here!
246 * The code here will replace portions of the ORIGINAL request with
247 * pointers to new code and buffers so the hashing operation can store
248 * the result in aligned buffer. We will call the modified request
249 * an ADJUSTED request.
251 * The newly mangled request will look as such:
253 * req {
254 * .result = ADJUSTED[new aligned buffer]
255 * .base.complete = ADJUSTED[pointer to completion function]
256 * .base.data = ADJUSTED[*req (pointer to self)]
257 * .priv = ADJUSTED[new priv] {
258 * .result = ORIGINAL(result)
259 * .complete = ORIGINAL(base.complete)
260 * .data = ORIGINAL(base.data)
264 priv->result = req->result;
265 priv->complete = req->base.complete;
266 priv->data = req->base.data;
267 priv->flags = req->base.flags;
270 * WARNING: We do not backup req->priv here! The req->priv
271 * is for internal use of the Crypto API and the
272 * user must _NOT_ _EVER_ depend on it's content!
275 req->result = PTR_ALIGN((u8 *)priv->ubuf, alignmask + 1);
276 req->base.complete = cplt;
277 req->base.data = req;
278 req->priv = priv;
280 return 0;
283 static void ahash_restore_req(struct ahash_request *req, int err)
285 struct ahash_request_priv *priv = req->priv;
287 if (!err)
288 memcpy(priv->result, req->result,
289 crypto_ahash_digestsize(crypto_ahash_reqtfm(req)));
291 /* Restore the original crypto request. */
292 req->result = priv->result;
294 ahash_request_set_callback(req, priv->flags,
295 priv->complete, priv->data);
296 req->priv = NULL;
298 /* Free the req->priv.priv from the ADJUSTED request. */
299 kzfree(priv);
302 static void ahash_notify_einprogress(struct ahash_request *req)
304 struct ahash_request_priv *priv = req->priv;
305 struct crypto_async_request oreq;
307 oreq.data = priv->data;
309 priv->complete(&oreq, -EINPROGRESS);
312 static void ahash_op_unaligned_done(struct crypto_async_request *req, int err)
314 struct ahash_request *areq = req->data;
316 if (err == -EINPROGRESS) {
317 ahash_notify_einprogress(areq);
318 return;
322 * Restore the original request, see ahash_op_unaligned() for what
323 * goes where.
325 * The "struct ahash_request *req" here is in fact the "req.base"
326 * from the ADJUSTED request from ahash_op_unaligned(), thus as it
327 * is a pointer to self, it is also the ADJUSTED "req" .
330 /* First copy req->result into req->priv.result */
331 ahash_restore_req(areq, err);
333 /* Complete the ORIGINAL request. */
334 areq->base.complete(&areq->base, err);
337 static int ahash_op_unaligned(struct ahash_request *req,
338 int (*op)(struct ahash_request *))
340 int err;
342 err = ahash_save_req(req, ahash_op_unaligned_done);
343 if (err)
344 return err;
346 err = op(req);
347 if (err == -EINPROGRESS ||
348 (err == -EBUSY && (ahash_request_flags(req) &
349 CRYPTO_TFM_REQ_MAY_BACKLOG)))
350 return err;
352 ahash_restore_req(req, err);
354 return err;
357 static int crypto_ahash_op(struct ahash_request *req,
358 int (*op)(struct ahash_request *))
360 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
361 unsigned long alignmask = crypto_ahash_alignmask(tfm);
363 if ((unsigned long)req->result & alignmask)
364 return ahash_op_unaligned(req, op);
366 return op(req);
369 int crypto_ahash_final(struct ahash_request *req)
371 return crypto_ahash_op(req, crypto_ahash_reqtfm(req)->final);
373 EXPORT_SYMBOL_GPL(crypto_ahash_final);
375 int crypto_ahash_finup(struct ahash_request *req)
377 return crypto_ahash_op(req, crypto_ahash_reqtfm(req)->finup);
379 EXPORT_SYMBOL_GPL(crypto_ahash_finup);
381 int crypto_ahash_digest(struct ahash_request *req)
383 return crypto_ahash_op(req, crypto_ahash_reqtfm(req)->digest);
385 EXPORT_SYMBOL_GPL(crypto_ahash_digest);
387 static void ahash_def_finup_done2(struct crypto_async_request *req, int err)
389 struct ahash_request *areq = req->data;
391 if (err == -EINPROGRESS)
392 return;
394 ahash_restore_req(areq, err);
396 areq->base.complete(&areq->base, err);
399 static int ahash_def_finup_finish1(struct ahash_request *req, int err)
401 if (err)
402 goto out;
404 req->base.complete = ahash_def_finup_done2;
406 err = crypto_ahash_reqtfm(req)->final(req);
407 if (err == -EINPROGRESS ||
408 (err == -EBUSY && (ahash_request_flags(req) &
409 CRYPTO_TFM_REQ_MAY_BACKLOG)))
410 return err;
412 out:
413 ahash_restore_req(req, err);
414 return err;
417 static void ahash_def_finup_done1(struct crypto_async_request *req, int err)
419 struct ahash_request *areq = req->data;
421 if (err == -EINPROGRESS) {
422 ahash_notify_einprogress(areq);
423 return;
426 areq->base.flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
428 err = ahash_def_finup_finish1(areq, err);
429 if (areq->priv)
430 return;
432 areq->base.complete(&areq->base, err);
435 static int ahash_def_finup(struct ahash_request *req)
437 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
438 int err;
440 err = ahash_save_req(req, ahash_def_finup_done1);
441 if (err)
442 return err;
444 err = tfm->update(req);
445 if (err == -EINPROGRESS ||
446 (err == -EBUSY && (ahash_request_flags(req) &
447 CRYPTO_TFM_REQ_MAY_BACKLOG)))
448 return err;
450 return ahash_def_finup_finish1(req, err);
453 static int ahash_no_export(struct ahash_request *req, void *out)
455 return -ENOSYS;
458 static int ahash_no_import(struct ahash_request *req, const void *in)
460 return -ENOSYS;
463 static int crypto_ahash_init_tfm(struct crypto_tfm *tfm)
465 struct crypto_ahash *hash = __crypto_ahash_cast(tfm);
466 struct ahash_alg *alg = crypto_ahash_alg(hash);
468 hash->setkey = ahash_nosetkey;
469 hash->has_setkey = false;
470 hash->export = ahash_no_export;
471 hash->import = ahash_no_import;
473 if (tfm->__crt_alg->cra_type != &crypto_ahash_type)
474 return crypto_init_shash_ops_async(tfm);
476 hash->init = alg->init;
477 hash->update = alg->update;
478 hash->final = alg->final;
479 hash->finup = alg->finup ?: ahash_def_finup;
480 hash->digest = alg->digest;
482 if (alg->setkey) {
483 hash->setkey = alg->setkey;
484 hash->has_setkey = true;
486 if (alg->export)
487 hash->export = alg->export;
488 if (alg->import)
489 hash->import = alg->import;
491 return 0;
494 static unsigned int crypto_ahash_extsize(struct crypto_alg *alg)
496 if (alg->cra_type == &crypto_ahash_type)
497 return alg->cra_ctxsize;
499 return sizeof(struct crypto_shash *);
502 #ifdef CONFIG_NET
503 static int crypto_ahash_report(struct sk_buff *skb, struct crypto_alg *alg)
505 struct crypto_report_hash rhash;
507 strncpy(rhash.type, "ahash", sizeof(rhash.type));
509 rhash.blocksize = alg->cra_blocksize;
510 rhash.digestsize = __crypto_hash_alg_common(alg)->digestsize;
512 if (nla_put(skb, CRYPTOCFGA_REPORT_HASH,
513 sizeof(struct crypto_report_hash), &rhash))
514 goto nla_put_failure;
515 return 0;
517 nla_put_failure:
518 return -EMSGSIZE;
520 #else
521 static int crypto_ahash_report(struct sk_buff *skb, struct crypto_alg *alg)
523 return -ENOSYS;
525 #endif
527 static void crypto_ahash_show(struct seq_file *m, struct crypto_alg *alg)
528 __attribute__ ((unused));
529 static void crypto_ahash_show(struct seq_file *m, struct crypto_alg *alg)
531 seq_printf(m, "type : ahash\n");
532 seq_printf(m, "async : %s\n", alg->cra_flags & CRYPTO_ALG_ASYNC ?
533 "yes" : "no");
534 seq_printf(m, "blocksize : %u\n", alg->cra_blocksize);
535 seq_printf(m, "digestsize : %u\n",
536 __crypto_hash_alg_common(alg)->digestsize);
539 const struct crypto_type crypto_ahash_type = {
540 .extsize = crypto_ahash_extsize,
541 .init_tfm = crypto_ahash_init_tfm,
542 #ifdef CONFIG_PROC_FS
543 .show = crypto_ahash_show,
544 #endif
545 .report = crypto_ahash_report,
546 .maskclear = ~CRYPTO_ALG_TYPE_MASK,
547 .maskset = CRYPTO_ALG_TYPE_AHASH_MASK,
548 .type = CRYPTO_ALG_TYPE_AHASH,
549 .tfmsize = offsetof(struct crypto_ahash, base),
551 EXPORT_SYMBOL_GPL(crypto_ahash_type);
553 struct crypto_ahash *crypto_alloc_ahash(const char *alg_name, u32 type,
554 u32 mask)
556 return crypto_alloc_tfm(alg_name, &crypto_ahash_type, type, mask);
558 EXPORT_SYMBOL_GPL(crypto_alloc_ahash);
560 static int ahash_prepare_alg(struct ahash_alg *alg)
562 struct crypto_alg *base = &alg->halg.base;
564 if (alg->halg.digestsize > PAGE_SIZE / 8 ||
565 alg->halg.statesize > PAGE_SIZE / 8 ||
566 alg->halg.statesize == 0)
567 return -EINVAL;
569 base->cra_type = &crypto_ahash_type;
570 base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK;
571 base->cra_flags |= CRYPTO_ALG_TYPE_AHASH;
573 return 0;
576 int crypto_register_ahash(struct ahash_alg *alg)
578 struct crypto_alg *base = &alg->halg.base;
579 int err;
581 err = ahash_prepare_alg(alg);
582 if (err)
583 return err;
585 return crypto_register_alg(base);
587 EXPORT_SYMBOL_GPL(crypto_register_ahash);
589 int crypto_unregister_ahash(struct ahash_alg *alg)
591 return crypto_unregister_alg(&alg->halg.base);
593 EXPORT_SYMBOL_GPL(crypto_unregister_ahash);
595 int ahash_register_instance(struct crypto_template *tmpl,
596 struct ahash_instance *inst)
598 int err;
600 err = ahash_prepare_alg(&inst->alg);
601 if (err)
602 return err;
604 return crypto_register_instance(tmpl, ahash_crypto_instance(inst));
606 EXPORT_SYMBOL_GPL(ahash_register_instance);
608 void ahash_free_instance(struct crypto_instance *inst)
610 crypto_drop_spawn(crypto_instance_ctx(inst));
611 kfree(ahash_instance(inst));
613 EXPORT_SYMBOL_GPL(ahash_free_instance);
615 int crypto_init_ahash_spawn(struct crypto_ahash_spawn *spawn,
616 struct hash_alg_common *alg,
617 struct crypto_instance *inst)
619 return crypto_init_spawn2(&spawn->base, &alg->base, inst,
620 &crypto_ahash_type);
622 EXPORT_SYMBOL_GPL(crypto_init_ahash_spawn);
624 struct hash_alg_common *ahash_attr_alg(struct rtattr *rta, u32 type, u32 mask)
626 struct crypto_alg *alg;
628 alg = crypto_attr_alg2(rta, &crypto_ahash_type, type, mask);
629 return IS_ERR(alg) ? ERR_CAST(alg) : __crypto_hash_alg_common(alg);
631 EXPORT_SYMBOL_GPL(ahash_attr_alg);
633 MODULE_LICENSE("GPL");
634 MODULE_DESCRIPTION("Asynchronous cryptographic hash type");