2 * Asynchronous Cryptographic Hash operations.
4 * This is the asynchronous version of hash.c with notification of
5 * completion via a callback.
7 * Copyright (c) 2008 Loc Ho <lho@amcc.com>
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms of the GNU General Public License as published by the Free
11 * Software Foundation; either version 2 of the License, or (at your option)
16 #include <crypto/internal/hash.h>
17 #include <crypto/scatterwalk.h>
18 #include <linux/err.h>
19 #include <linux/kernel.h>
20 #include <linux/module.h>
21 #include <linux/sched.h>
22 #include <linux/slab.h>
23 #include <linux/seq_file.h>
24 #include <linux/cryptouser.h>
25 #include <net/netlink.h>
29 struct ahash_request_priv
{
30 crypto_completion_t complete
;
33 void *ubuf
[] CRYPTO_MINALIGN_ATTR
;
36 static inline struct ahash_alg
*crypto_ahash_alg(struct crypto_ahash
*hash
)
38 return container_of(crypto_hash_alg_common(hash
), struct ahash_alg
,
42 static int hash_walk_next(struct crypto_hash_walk
*walk
)
44 unsigned int alignmask
= walk
->alignmask
;
45 unsigned int offset
= walk
->offset
;
46 unsigned int nbytes
= min(walk
->entrylen
,
47 ((unsigned int)(PAGE_SIZE
)) - offset
);
49 walk
->data
= kmap_atomic(walk
->pg
);
52 if (offset
& alignmask
) {
53 unsigned int unaligned
= alignmask
+ 1 - (offset
& alignmask
);
54 if (nbytes
> unaligned
)
58 walk
->entrylen
-= nbytes
;
62 static int hash_walk_new_entry(struct crypto_hash_walk
*walk
)
64 struct scatterlist
*sg
;
67 walk
->pg
= sg_page(sg
);
68 walk
->offset
= sg
->offset
;
69 walk
->entrylen
= sg
->length
;
71 if (walk
->entrylen
> walk
->total
)
72 walk
->entrylen
= walk
->total
;
73 walk
->total
-= walk
->entrylen
;
75 return hash_walk_next(walk
);
78 int crypto_hash_walk_done(struct crypto_hash_walk
*walk
, int err
)
80 unsigned int alignmask
= walk
->alignmask
;
81 unsigned int nbytes
= walk
->entrylen
;
83 walk
->data
-= walk
->offset
;
85 if (nbytes
&& walk
->offset
& alignmask
&& !err
) {
86 walk
->offset
= ALIGN(walk
->offset
, alignmask
+ 1);
87 walk
->data
+= walk
->offset
;
90 ((unsigned int)(PAGE_SIZE
)) - walk
->offset
);
91 walk
->entrylen
-= nbytes
;
96 kunmap_atomic(walk
->data
);
97 crypto_yield(walk
->flags
);
105 return hash_walk_next(walk
);
111 walk
->sg
= scatterwalk_sg_next(walk
->sg
);
113 return hash_walk_new_entry(walk
);
115 EXPORT_SYMBOL_GPL(crypto_hash_walk_done
);
117 int crypto_hash_walk_first(struct ahash_request
*req
,
118 struct crypto_hash_walk
*walk
)
120 walk
->total
= req
->nbytes
;
125 walk
->alignmask
= crypto_ahash_alignmask(crypto_ahash_reqtfm(req
));
127 walk
->flags
= req
->base
.flags
;
129 return hash_walk_new_entry(walk
);
131 EXPORT_SYMBOL_GPL(crypto_hash_walk_first
);
133 int crypto_hash_walk_first_compat(struct hash_desc
*hdesc
,
134 struct crypto_hash_walk
*walk
,
135 struct scatterlist
*sg
, unsigned int len
)
142 walk
->alignmask
= crypto_hash_alignmask(hdesc
->tfm
);
144 walk
->flags
= hdesc
->flags
;
146 return hash_walk_new_entry(walk
);
149 static int ahash_setkey_unaligned(struct crypto_ahash
*tfm
, const u8
*key
,
152 unsigned long alignmask
= crypto_ahash_alignmask(tfm
);
154 u8
*buffer
, *alignbuffer
;
155 unsigned long absize
;
157 absize
= keylen
+ alignmask
;
158 buffer
= kmalloc(absize
, GFP_KERNEL
);
162 alignbuffer
= (u8
*)ALIGN((unsigned long)buffer
, alignmask
+ 1);
163 memcpy(alignbuffer
, key
, keylen
);
164 ret
= tfm
->setkey(tfm
, alignbuffer
, keylen
);
169 int crypto_ahash_setkey(struct crypto_ahash
*tfm
, const u8
*key
,
172 unsigned long alignmask
= crypto_ahash_alignmask(tfm
);
174 if ((unsigned long)key
& alignmask
)
175 return ahash_setkey_unaligned(tfm
, key
, keylen
);
177 return tfm
->setkey(tfm
, key
, keylen
);
179 EXPORT_SYMBOL_GPL(crypto_ahash_setkey
);
181 static int ahash_nosetkey(struct crypto_ahash
*tfm
, const u8
*key
,
187 static inline unsigned int ahash_align_buffer_size(unsigned len
,
190 return len
+ (mask
& ~(crypto_tfm_ctx_alignment() - 1));
193 static int ahash_save_req(struct ahash_request
*req
, crypto_completion_t cplt
)
195 struct crypto_ahash
*tfm
= crypto_ahash_reqtfm(req
);
196 unsigned long alignmask
= crypto_ahash_alignmask(tfm
);
197 unsigned int ds
= crypto_ahash_digestsize(tfm
);
198 struct ahash_request_priv
*priv
;
200 priv
= kmalloc(sizeof(*priv
) + ahash_align_buffer_size(ds
, alignmask
),
201 (req
->base
.flags
& CRYPTO_TFM_REQ_MAY_SLEEP
) ?
202 GFP_KERNEL
: GFP_ATOMIC
);
207 * WARNING: Voodoo programming below!
209 * The code below is obscure and hard to understand, thus explanation
210 * is necessary. See include/crypto/hash.h and include/linux/crypto.h
211 * to understand the layout of structures used here!
213 * The code here will replace portions of the ORIGINAL request with
214 * pointers to new code and buffers so the hashing operation can store
215 * the result in aligned buffer. We will call the modified request
216 * an ADJUSTED request.
218 * The newly mangled request will look as such:
221 * .result = ADJUSTED[new aligned buffer]
222 * .base.complete = ADJUSTED[pointer to completion function]
223 * .base.data = ADJUSTED[*req (pointer to self)]
224 * .priv = ADJUSTED[new priv] {
225 * .result = ORIGINAL(result)
226 * .complete = ORIGINAL(base.complete)
227 * .data = ORIGINAL(base.data)
231 priv
->result
= req
->result
;
232 priv
->complete
= req
->base
.complete
;
233 priv
->data
= req
->base
.data
;
235 * WARNING: We do not backup req->priv here! The req->priv
236 * is for internal use of the Crypto API and the
237 * user must _NOT_ _EVER_ depend on it's content!
240 req
->result
= PTR_ALIGN((u8
*)priv
->ubuf
, alignmask
+ 1);
241 req
->base
.complete
= cplt
;
242 req
->base
.data
= req
;
248 static void ahash_restore_req(struct ahash_request
*req
)
250 struct ahash_request_priv
*priv
= req
->priv
;
252 /* Restore the original crypto request. */
253 req
->result
= priv
->result
;
254 req
->base
.complete
= priv
->complete
;
255 req
->base
.data
= priv
->data
;
258 /* Free the req->priv.priv from the ADJUSTED request. */
262 static void ahash_op_unaligned_finish(struct ahash_request
*req
, int err
)
264 struct ahash_request_priv
*priv
= req
->priv
;
266 if (err
== -EINPROGRESS
)
270 memcpy(priv
->result
, req
->result
,
271 crypto_ahash_digestsize(crypto_ahash_reqtfm(req
)));
273 ahash_restore_req(req
);
276 static void ahash_op_unaligned_done(struct crypto_async_request
*req
, int err
)
278 struct ahash_request
*areq
= req
->data
;
281 * Restore the original request, see ahash_op_unaligned() for what
284 * The "struct ahash_request *req" here is in fact the "req.base"
285 * from the ADJUSTED request from ahash_op_unaligned(), thus as it
286 * is a pointer to self, it is also the ADJUSTED "req" .
289 /* First copy req->result into req->priv.result */
290 ahash_op_unaligned_finish(areq
, err
);
292 /* Complete the ORIGINAL request. */
293 areq
->base
.complete(&areq
->base
, err
);
296 static int ahash_op_unaligned(struct ahash_request
*req
,
297 int (*op
)(struct ahash_request
*))
301 err
= ahash_save_req(req
, ahash_op_unaligned_done
);
306 ahash_op_unaligned_finish(req
, err
);
311 static int crypto_ahash_op(struct ahash_request
*req
,
312 int (*op
)(struct ahash_request
*))
314 struct crypto_ahash
*tfm
= crypto_ahash_reqtfm(req
);
315 unsigned long alignmask
= crypto_ahash_alignmask(tfm
);
317 if ((unsigned long)req
->result
& alignmask
)
318 return ahash_op_unaligned(req
, op
);
323 int crypto_ahash_final(struct ahash_request
*req
)
325 return crypto_ahash_op(req
, crypto_ahash_reqtfm(req
)->final
);
327 EXPORT_SYMBOL_GPL(crypto_ahash_final
);
329 int crypto_ahash_finup(struct ahash_request
*req
)
331 return crypto_ahash_op(req
, crypto_ahash_reqtfm(req
)->finup
);
333 EXPORT_SYMBOL_GPL(crypto_ahash_finup
);
335 int crypto_ahash_digest(struct ahash_request
*req
)
337 return crypto_ahash_op(req
, crypto_ahash_reqtfm(req
)->digest
);
339 EXPORT_SYMBOL_GPL(crypto_ahash_digest
);
341 static void ahash_def_finup_finish2(struct ahash_request
*req
, int err
)
343 struct ahash_request_priv
*priv
= req
->priv
;
345 if (err
== -EINPROGRESS
)
349 memcpy(priv
->result
, req
->result
,
350 crypto_ahash_digestsize(crypto_ahash_reqtfm(req
)));
352 ahash_restore_req(req
);
355 static void ahash_def_finup_done2(struct crypto_async_request
*req
, int err
)
357 struct ahash_request
*areq
= req
->data
;
359 ahash_def_finup_finish2(areq
, err
);
361 areq
->base
.complete(&areq
->base
, err
);
364 static int ahash_def_finup_finish1(struct ahash_request
*req
, int err
)
369 req
->base
.complete
= ahash_def_finup_done2
;
370 req
->base
.flags
&= ~CRYPTO_TFM_REQ_MAY_SLEEP
;
371 err
= crypto_ahash_reqtfm(req
)->final(req
);
374 ahash_def_finup_finish2(req
, err
);
378 static void ahash_def_finup_done1(struct crypto_async_request
*req
, int err
)
380 struct ahash_request
*areq
= req
->data
;
382 err
= ahash_def_finup_finish1(areq
, err
);
384 areq
->base
.complete(&areq
->base
, err
);
387 static int ahash_def_finup(struct ahash_request
*req
)
389 struct crypto_ahash
*tfm
= crypto_ahash_reqtfm(req
);
392 err
= ahash_save_req(req
, ahash_def_finup_done1
);
396 err
= tfm
->update(req
);
397 return ahash_def_finup_finish1(req
, err
);
400 static int ahash_no_export(struct ahash_request
*req
, void *out
)
405 static int ahash_no_import(struct ahash_request
*req
, const void *in
)
410 static int crypto_ahash_init_tfm(struct crypto_tfm
*tfm
)
412 struct crypto_ahash
*hash
= __crypto_ahash_cast(tfm
);
413 struct ahash_alg
*alg
= crypto_ahash_alg(hash
);
415 hash
->setkey
= ahash_nosetkey
;
416 hash
->export
= ahash_no_export
;
417 hash
->import
= ahash_no_import
;
419 if (tfm
->__crt_alg
->cra_type
!= &crypto_ahash_type
)
420 return crypto_init_shash_ops_async(tfm
);
422 hash
->init
= alg
->init
;
423 hash
->update
= alg
->update
;
424 hash
->final
= alg
->final
;
425 hash
->finup
= alg
->finup
?: ahash_def_finup
;
426 hash
->digest
= alg
->digest
;
429 hash
->setkey
= alg
->setkey
;
431 hash
->export
= alg
->export
;
433 hash
->import
= alg
->import
;
438 static unsigned int crypto_ahash_extsize(struct crypto_alg
*alg
)
440 if (alg
->cra_type
== &crypto_ahash_type
)
441 return alg
->cra_ctxsize
;
443 return sizeof(struct crypto_shash
*);
447 static int crypto_ahash_report(struct sk_buff
*skb
, struct crypto_alg
*alg
)
449 struct crypto_report_hash rhash
;
451 strncpy(rhash
.type
, "ahash", sizeof(rhash
.type
));
453 rhash
.blocksize
= alg
->cra_blocksize
;
454 rhash
.digestsize
= __crypto_hash_alg_common(alg
)->digestsize
;
456 if (nla_put(skb
, CRYPTOCFGA_REPORT_HASH
,
457 sizeof(struct crypto_report_hash
), &rhash
))
458 goto nla_put_failure
;
465 static int crypto_ahash_report(struct sk_buff
*skb
, struct crypto_alg
*alg
)
471 static void crypto_ahash_show(struct seq_file
*m
, struct crypto_alg
*alg
)
472 __attribute__ ((unused
));
473 static void crypto_ahash_show(struct seq_file
*m
, struct crypto_alg
*alg
)
475 seq_printf(m
, "type : ahash\n");
476 seq_printf(m
, "async : %s\n", alg
->cra_flags
& CRYPTO_ALG_ASYNC
?
478 seq_printf(m
, "blocksize : %u\n", alg
->cra_blocksize
);
479 seq_printf(m
, "digestsize : %u\n",
480 __crypto_hash_alg_common(alg
)->digestsize
);
483 const struct crypto_type crypto_ahash_type
= {
484 .extsize
= crypto_ahash_extsize
,
485 .init_tfm
= crypto_ahash_init_tfm
,
486 #ifdef CONFIG_PROC_FS
487 .show
= crypto_ahash_show
,
489 .report
= crypto_ahash_report
,
490 .maskclear
= ~CRYPTO_ALG_TYPE_MASK
,
491 .maskset
= CRYPTO_ALG_TYPE_AHASH_MASK
,
492 .type
= CRYPTO_ALG_TYPE_AHASH
,
493 .tfmsize
= offsetof(struct crypto_ahash
, base
),
495 EXPORT_SYMBOL_GPL(crypto_ahash_type
);
497 struct crypto_ahash
*crypto_alloc_ahash(const char *alg_name
, u32 type
,
500 return crypto_alloc_tfm(alg_name
, &crypto_ahash_type
, type
, mask
);
502 EXPORT_SYMBOL_GPL(crypto_alloc_ahash
);
504 static int ahash_prepare_alg(struct ahash_alg
*alg
)
506 struct crypto_alg
*base
= &alg
->halg
.base
;
508 if (alg
->halg
.digestsize
> PAGE_SIZE
/ 8 ||
509 alg
->halg
.statesize
> PAGE_SIZE
/ 8)
512 base
->cra_type
= &crypto_ahash_type
;
513 base
->cra_flags
&= ~CRYPTO_ALG_TYPE_MASK
;
514 base
->cra_flags
|= CRYPTO_ALG_TYPE_AHASH
;
519 int crypto_register_ahash(struct ahash_alg
*alg
)
521 struct crypto_alg
*base
= &alg
->halg
.base
;
524 err
= ahash_prepare_alg(alg
);
528 return crypto_register_alg(base
);
530 EXPORT_SYMBOL_GPL(crypto_register_ahash
);
532 int crypto_unregister_ahash(struct ahash_alg
*alg
)
534 return crypto_unregister_alg(&alg
->halg
.base
);
536 EXPORT_SYMBOL_GPL(crypto_unregister_ahash
);
538 int ahash_register_instance(struct crypto_template
*tmpl
,
539 struct ahash_instance
*inst
)
543 err
= ahash_prepare_alg(&inst
->alg
);
547 return crypto_register_instance(tmpl
, ahash_crypto_instance(inst
));
549 EXPORT_SYMBOL_GPL(ahash_register_instance
);
551 void ahash_free_instance(struct crypto_instance
*inst
)
553 crypto_drop_spawn(crypto_instance_ctx(inst
));
554 kfree(ahash_instance(inst
));
556 EXPORT_SYMBOL_GPL(ahash_free_instance
);
558 int crypto_init_ahash_spawn(struct crypto_ahash_spawn
*spawn
,
559 struct hash_alg_common
*alg
,
560 struct crypto_instance
*inst
)
562 return crypto_init_spawn2(&spawn
->base
, &alg
->base
, inst
,
565 EXPORT_SYMBOL_GPL(crypto_init_ahash_spawn
);
567 struct hash_alg_common
*ahash_attr_alg(struct rtattr
*rta
, u32 type
, u32 mask
)
569 struct crypto_alg
*alg
;
571 alg
= crypto_attr_alg2(rta
, &crypto_ahash_type
, type
, mask
);
572 return IS_ERR(alg
) ? ERR_CAST(alg
) : __crypto_hash_alg_common(alg
);
574 EXPORT_SYMBOL_GPL(ahash_attr_alg
);
576 MODULE_LICENSE("GPL");
577 MODULE_DESCRIPTION("Asynchronous cryptographic hash type");