1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Asynchronous Cryptographic Hash operations.
5 * This is the asynchronous version of hash.c with notification of
6 * completion via a callback.
8 * Copyright (c) 2008 Loc Ho <lho@amcc.com>
11 #include <crypto/internal/hash.h>
12 #include <crypto/scatterwalk.h>
13 #include <linux/bug.h>
14 #include <linux/err.h>
15 #include <linux/kernel.h>
16 #include <linux/module.h>
17 #include <linux/sched.h>
18 #include <linux/slab.h>
19 #include <linux/seq_file.h>
20 #include <linux/cryptouser.h>
21 #include <linux/compiler.h>
22 #include <net/netlink.h>
26 struct ahash_request_priv
{
27 crypto_completion_t complete
;
31 void *ubuf
[] CRYPTO_MINALIGN_ATTR
;
34 static inline struct ahash_alg
*crypto_ahash_alg(struct crypto_ahash
*hash
)
36 return container_of(crypto_hash_alg_common(hash
), struct ahash_alg
,
40 static int hash_walk_next(struct crypto_hash_walk
*walk
)
42 unsigned int alignmask
= walk
->alignmask
;
43 unsigned int offset
= walk
->offset
;
44 unsigned int nbytes
= min(walk
->entrylen
,
45 ((unsigned int)(PAGE_SIZE
)) - offset
);
47 if (walk
->flags
& CRYPTO_ALG_ASYNC
)
48 walk
->data
= kmap(walk
->pg
);
50 walk
->data
= kmap_atomic(walk
->pg
);
53 if (offset
& alignmask
) {
54 unsigned int unaligned
= alignmask
+ 1 - (offset
& alignmask
);
56 if (nbytes
> unaligned
)
60 walk
->entrylen
-= nbytes
;
64 static int hash_walk_new_entry(struct crypto_hash_walk
*walk
)
66 struct scatterlist
*sg
;
69 walk
->offset
= sg
->offset
;
70 walk
->pg
= sg_page(walk
->sg
) + (walk
->offset
>> PAGE_SHIFT
);
71 walk
->offset
= offset_in_page(walk
->offset
);
72 walk
->entrylen
= sg
->length
;
74 if (walk
->entrylen
> walk
->total
)
75 walk
->entrylen
= walk
->total
;
76 walk
->total
-= walk
->entrylen
;
78 return hash_walk_next(walk
);
81 int crypto_hash_walk_done(struct crypto_hash_walk
*walk
, int err
)
83 unsigned int alignmask
= walk
->alignmask
;
85 walk
->data
-= walk
->offset
;
87 if (walk
->entrylen
&& (walk
->offset
& alignmask
) && !err
) {
90 walk
->offset
= ALIGN(walk
->offset
, alignmask
+ 1);
91 nbytes
= min(walk
->entrylen
,
92 (unsigned int)(PAGE_SIZE
- walk
->offset
));
94 walk
->entrylen
-= nbytes
;
95 walk
->data
+= walk
->offset
;
100 if (walk
->flags
& CRYPTO_ALG_ASYNC
)
103 kunmap_atomic(walk
->data
);
105 * The may sleep test only makes sense for sync users.
106 * Async users don't need to sleep here anyway.
108 crypto_yield(walk
->flags
);
114 if (walk
->entrylen
) {
117 return hash_walk_next(walk
);
123 walk
->sg
= sg_next(walk
->sg
);
125 return hash_walk_new_entry(walk
);
127 EXPORT_SYMBOL_GPL(crypto_hash_walk_done
);
129 int crypto_hash_walk_first(struct ahash_request
*req
,
130 struct crypto_hash_walk
*walk
)
132 walk
->total
= req
->nbytes
;
139 walk
->alignmask
= crypto_ahash_alignmask(crypto_ahash_reqtfm(req
));
141 walk
->flags
= req
->base
.flags
& CRYPTO_TFM_REQ_MASK
;
143 return hash_walk_new_entry(walk
);
145 EXPORT_SYMBOL_GPL(crypto_hash_walk_first
);
147 int crypto_ahash_walk_first(struct ahash_request
*req
,
148 struct crypto_hash_walk
*walk
)
150 walk
->total
= req
->nbytes
;
157 walk
->alignmask
= crypto_ahash_alignmask(crypto_ahash_reqtfm(req
));
159 walk
->flags
= req
->base
.flags
& CRYPTO_TFM_REQ_MASK
;
160 walk
->flags
|= CRYPTO_ALG_ASYNC
;
162 BUILD_BUG_ON(CRYPTO_TFM_REQ_MASK
& CRYPTO_ALG_ASYNC
);
164 return hash_walk_new_entry(walk
);
166 EXPORT_SYMBOL_GPL(crypto_ahash_walk_first
);
168 static int ahash_setkey_unaligned(struct crypto_ahash
*tfm
, const u8
*key
,
171 unsigned long alignmask
= crypto_ahash_alignmask(tfm
);
173 u8
*buffer
, *alignbuffer
;
174 unsigned long absize
;
176 absize
= keylen
+ alignmask
;
177 buffer
= kmalloc(absize
, GFP_KERNEL
);
181 alignbuffer
= (u8
*)ALIGN((unsigned long)buffer
, alignmask
+ 1);
182 memcpy(alignbuffer
, key
, keylen
);
183 ret
= tfm
->setkey(tfm
, alignbuffer
, keylen
);
188 static int ahash_nosetkey(struct crypto_ahash
*tfm
, const u8
*key
,
194 static void ahash_set_needkey(struct crypto_ahash
*tfm
)
196 const struct hash_alg_common
*alg
= crypto_hash_alg_common(tfm
);
198 if (tfm
->setkey
!= ahash_nosetkey
&&
199 !(alg
->base
.cra_flags
& CRYPTO_ALG_OPTIONAL_KEY
))
200 crypto_ahash_set_flags(tfm
, CRYPTO_TFM_NEED_KEY
);
203 int crypto_ahash_setkey(struct crypto_ahash
*tfm
, const u8
*key
,
206 unsigned long alignmask
= crypto_ahash_alignmask(tfm
);
209 if ((unsigned long)key
& alignmask
)
210 err
= ahash_setkey_unaligned(tfm
, key
, keylen
);
212 err
= tfm
->setkey(tfm
, key
, keylen
);
215 ahash_set_needkey(tfm
);
219 crypto_ahash_clear_flags(tfm
, CRYPTO_TFM_NEED_KEY
);
222 EXPORT_SYMBOL_GPL(crypto_ahash_setkey
);
224 static inline unsigned int ahash_align_buffer_size(unsigned len
,
227 return len
+ (mask
& ~(crypto_tfm_ctx_alignment() - 1));
230 static int ahash_save_req(struct ahash_request
*req
, crypto_completion_t cplt
)
232 struct crypto_ahash
*tfm
= crypto_ahash_reqtfm(req
);
233 unsigned long alignmask
= crypto_ahash_alignmask(tfm
);
234 unsigned int ds
= crypto_ahash_digestsize(tfm
);
235 struct ahash_request_priv
*priv
;
237 priv
= kmalloc(sizeof(*priv
) + ahash_align_buffer_size(ds
, alignmask
),
238 (req
->base
.flags
& CRYPTO_TFM_REQ_MAY_SLEEP
) ?
239 GFP_KERNEL
: GFP_ATOMIC
);
244 * WARNING: Voodoo programming below!
246 * The code below is obscure and hard to understand, thus explanation
247 * is necessary. See include/crypto/hash.h and include/linux/crypto.h
248 * to understand the layout of structures used here!
250 * The code here will replace portions of the ORIGINAL request with
251 * pointers to new code and buffers so the hashing operation can store
252 * the result in aligned buffer. We will call the modified request
253 * an ADJUSTED request.
255 * The newly mangled request will look as such:
258 * .result = ADJUSTED[new aligned buffer]
259 * .base.complete = ADJUSTED[pointer to completion function]
260 * .base.data = ADJUSTED[*req (pointer to self)]
261 * .priv = ADJUSTED[new priv] {
262 * .result = ORIGINAL(result)
263 * .complete = ORIGINAL(base.complete)
264 * .data = ORIGINAL(base.data)
268 priv
->result
= req
->result
;
269 priv
->complete
= req
->base
.complete
;
270 priv
->data
= req
->base
.data
;
271 priv
->flags
= req
->base
.flags
;
274 * WARNING: We do not backup req->priv here! The req->priv
275 * is for internal use of the Crypto API and the
276 * user must _NOT_ _EVER_ depend on it's content!
279 req
->result
= PTR_ALIGN((u8
*)priv
->ubuf
, alignmask
+ 1);
280 req
->base
.complete
= cplt
;
281 req
->base
.data
= req
;
287 static void ahash_restore_req(struct ahash_request
*req
, int err
)
289 struct ahash_request_priv
*priv
= req
->priv
;
292 memcpy(priv
->result
, req
->result
,
293 crypto_ahash_digestsize(crypto_ahash_reqtfm(req
)));
295 /* Restore the original crypto request. */
296 req
->result
= priv
->result
;
298 ahash_request_set_callback(req
, priv
->flags
,
299 priv
->complete
, priv
->data
);
302 /* Free the req->priv.priv from the ADJUSTED request. */
306 static void ahash_notify_einprogress(struct ahash_request
*req
)
308 struct ahash_request_priv
*priv
= req
->priv
;
309 struct crypto_async_request oreq
;
311 oreq
.data
= priv
->data
;
313 priv
->complete(&oreq
, -EINPROGRESS
);
316 static void ahash_op_unaligned_done(struct crypto_async_request
*req
, int err
)
318 struct ahash_request
*areq
= req
->data
;
320 if (err
== -EINPROGRESS
) {
321 ahash_notify_einprogress(areq
);
326 * Restore the original request, see ahash_op_unaligned() for what
329 * The "struct ahash_request *req" here is in fact the "req.base"
330 * from the ADJUSTED request from ahash_op_unaligned(), thus as it
331 * is a pointer to self, it is also the ADJUSTED "req" .
334 /* First copy req->result into req->priv.result */
335 ahash_restore_req(areq
, err
);
337 /* Complete the ORIGINAL request. */
338 areq
->base
.complete(&areq
->base
, err
);
341 static int ahash_op_unaligned(struct ahash_request
*req
,
342 int (*op
)(struct ahash_request
*))
346 err
= ahash_save_req(req
, ahash_op_unaligned_done
);
351 if (err
== -EINPROGRESS
|| err
== -EBUSY
)
354 ahash_restore_req(req
, err
);
359 static int crypto_ahash_op(struct ahash_request
*req
,
360 int (*op
)(struct ahash_request
*))
362 struct crypto_ahash
*tfm
= crypto_ahash_reqtfm(req
);
363 unsigned long alignmask
= crypto_ahash_alignmask(tfm
);
365 if ((unsigned long)req
->result
& alignmask
)
366 return ahash_op_unaligned(req
, op
);
371 int crypto_ahash_final(struct ahash_request
*req
)
373 struct crypto_ahash
*tfm
= crypto_ahash_reqtfm(req
);
374 struct crypto_alg
*alg
= tfm
->base
.__crt_alg
;
375 unsigned int nbytes
= req
->nbytes
;
378 crypto_stats_get(alg
);
379 ret
= crypto_ahash_op(req
, crypto_ahash_reqtfm(req
)->final
);
380 crypto_stats_ahash_final(nbytes
, ret
, alg
);
383 EXPORT_SYMBOL_GPL(crypto_ahash_final
);
385 int crypto_ahash_finup(struct ahash_request
*req
)
387 struct crypto_ahash
*tfm
= crypto_ahash_reqtfm(req
);
388 struct crypto_alg
*alg
= tfm
->base
.__crt_alg
;
389 unsigned int nbytes
= req
->nbytes
;
392 crypto_stats_get(alg
);
393 ret
= crypto_ahash_op(req
, crypto_ahash_reqtfm(req
)->finup
);
394 crypto_stats_ahash_final(nbytes
, ret
, alg
);
397 EXPORT_SYMBOL_GPL(crypto_ahash_finup
);
399 int crypto_ahash_digest(struct ahash_request
*req
)
401 struct crypto_ahash
*tfm
= crypto_ahash_reqtfm(req
);
402 struct crypto_alg
*alg
= tfm
->base
.__crt_alg
;
403 unsigned int nbytes
= req
->nbytes
;
406 crypto_stats_get(alg
);
407 if (crypto_ahash_get_flags(tfm
) & CRYPTO_TFM_NEED_KEY
)
410 ret
= crypto_ahash_op(req
, tfm
->digest
);
411 crypto_stats_ahash_final(nbytes
, ret
, alg
);
414 EXPORT_SYMBOL_GPL(crypto_ahash_digest
);
416 static void ahash_def_finup_done2(struct crypto_async_request
*req
, int err
)
418 struct ahash_request
*areq
= req
->data
;
420 if (err
== -EINPROGRESS
)
423 ahash_restore_req(areq
, err
);
425 areq
->base
.complete(&areq
->base
, err
);
428 static int ahash_def_finup_finish1(struct ahash_request
*req
, int err
)
433 req
->base
.complete
= ahash_def_finup_done2
;
435 err
= crypto_ahash_reqtfm(req
)->final(req
);
436 if (err
== -EINPROGRESS
|| err
== -EBUSY
)
440 ahash_restore_req(req
, err
);
444 static void ahash_def_finup_done1(struct crypto_async_request
*req
, int err
)
446 struct ahash_request
*areq
= req
->data
;
448 if (err
== -EINPROGRESS
) {
449 ahash_notify_einprogress(areq
);
453 areq
->base
.flags
&= ~CRYPTO_TFM_REQ_MAY_SLEEP
;
455 err
= ahash_def_finup_finish1(areq
, err
);
459 areq
->base
.complete(&areq
->base
, err
);
462 static int ahash_def_finup(struct ahash_request
*req
)
464 struct crypto_ahash
*tfm
= crypto_ahash_reqtfm(req
);
467 err
= ahash_save_req(req
, ahash_def_finup_done1
);
471 err
= tfm
->update(req
);
472 if (err
== -EINPROGRESS
|| err
== -EBUSY
)
475 return ahash_def_finup_finish1(req
, err
);
478 static int crypto_ahash_init_tfm(struct crypto_tfm
*tfm
)
480 struct crypto_ahash
*hash
= __crypto_ahash_cast(tfm
);
481 struct ahash_alg
*alg
= crypto_ahash_alg(hash
);
483 hash
->setkey
= ahash_nosetkey
;
485 if (tfm
->__crt_alg
->cra_type
!= &crypto_ahash_type
)
486 return crypto_init_shash_ops_async(tfm
);
488 hash
->init
= alg
->init
;
489 hash
->update
= alg
->update
;
490 hash
->final
= alg
->final
;
491 hash
->finup
= alg
->finup
?: ahash_def_finup
;
492 hash
->digest
= alg
->digest
;
493 hash
->export
= alg
->export
;
494 hash
->import
= alg
->import
;
497 hash
->setkey
= alg
->setkey
;
498 ahash_set_needkey(hash
);
504 static unsigned int crypto_ahash_extsize(struct crypto_alg
*alg
)
506 if (alg
->cra_type
!= &crypto_ahash_type
)
507 return sizeof(struct crypto_shash
*);
509 return crypto_alg_extsize(alg
);
513 static int crypto_ahash_report(struct sk_buff
*skb
, struct crypto_alg
*alg
)
515 struct crypto_report_hash rhash
;
517 memset(&rhash
, 0, sizeof(rhash
));
519 strscpy(rhash
.type
, "ahash", sizeof(rhash
.type
));
521 rhash
.blocksize
= alg
->cra_blocksize
;
522 rhash
.digestsize
= __crypto_hash_alg_common(alg
)->digestsize
;
524 return nla_put(skb
, CRYPTOCFGA_REPORT_HASH
, sizeof(rhash
), &rhash
);
527 static int crypto_ahash_report(struct sk_buff
*skb
, struct crypto_alg
*alg
)
533 static void crypto_ahash_show(struct seq_file
*m
, struct crypto_alg
*alg
)
535 static void crypto_ahash_show(struct seq_file
*m
, struct crypto_alg
*alg
)
537 seq_printf(m
, "type : ahash\n");
538 seq_printf(m
, "async : %s\n", alg
->cra_flags
& CRYPTO_ALG_ASYNC
?
540 seq_printf(m
, "blocksize : %u\n", alg
->cra_blocksize
);
541 seq_printf(m
, "digestsize : %u\n",
542 __crypto_hash_alg_common(alg
)->digestsize
);
545 const struct crypto_type crypto_ahash_type
= {
546 .extsize
= crypto_ahash_extsize
,
547 .init_tfm
= crypto_ahash_init_tfm
,
548 #ifdef CONFIG_PROC_FS
549 .show
= crypto_ahash_show
,
551 .report
= crypto_ahash_report
,
552 .maskclear
= ~CRYPTO_ALG_TYPE_MASK
,
553 .maskset
= CRYPTO_ALG_TYPE_AHASH_MASK
,
554 .type
= CRYPTO_ALG_TYPE_AHASH
,
555 .tfmsize
= offsetof(struct crypto_ahash
, base
),
557 EXPORT_SYMBOL_GPL(crypto_ahash_type
);
559 struct crypto_ahash
*crypto_alloc_ahash(const char *alg_name
, u32 type
,
562 return crypto_alloc_tfm(alg_name
, &crypto_ahash_type
, type
, mask
);
564 EXPORT_SYMBOL_GPL(crypto_alloc_ahash
);
566 int crypto_has_ahash(const char *alg_name
, u32 type
, u32 mask
)
568 return crypto_type_has_alg(alg_name
, &crypto_ahash_type
, type
, mask
);
570 EXPORT_SYMBOL_GPL(crypto_has_ahash
);
572 static int ahash_prepare_alg(struct ahash_alg
*alg
)
574 struct crypto_alg
*base
= &alg
->halg
.base
;
576 if (alg
->halg
.digestsize
> HASH_MAX_DIGESTSIZE
||
577 alg
->halg
.statesize
> HASH_MAX_STATESIZE
||
578 alg
->halg
.statesize
== 0)
581 base
->cra_type
= &crypto_ahash_type
;
582 base
->cra_flags
&= ~CRYPTO_ALG_TYPE_MASK
;
583 base
->cra_flags
|= CRYPTO_ALG_TYPE_AHASH
;
588 int crypto_register_ahash(struct ahash_alg
*alg
)
590 struct crypto_alg
*base
= &alg
->halg
.base
;
593 err
= ahash_prepare_alg(alg
);
597 return crypto_register_alg(base
);
599 EXPORT_SYMBOL_GPL(crypto_register_ahash
);
601 int crypto_unregister_ahash(struct ahash_alg
*alg
)
603 return crypto_unregister_alg(&alg
->halg
.base
);
605 EXPORT_SYMBOL_GPL(crypto_unregister_ahash
);
607 int crypto_register_ahashes(struct ahash_alg
*algs
, int count
)
611 for (i
= 0; i
< count
; i
++) {
612 ret
= crypto_register_ahash(&algs
[i
]);
620 for (--i
; i
>= 0; --i
)
621 crypto_unregister_ahash(&algs
[i
]);
625 EXPORT_SYMBOL_GPL(crypto_register_ahashes
);
627 void crypto_unregister_ahashes(struct ahash_alg
*algs
, int count
)
631 for (i
= count
- 1; i
>= 0; --i
)
632 crypto_unregister_ahash(&algs
[i
]);
634 EXPORT_SYMBOL_GPL(crypto_unregister_ahashes
);
636 int ahash_register_instance(struct crypto_template
*tmpl
,
637 struct ahash_instance
*inst
)
641 err
= ahash_prepare_alg(&inst
->alg
);
645 return crypto_register_instance(tmpl
, ahash_crypto_instance(inst
));
647 EXPORT_SYMBOL_GPL(ahash_register_instance
);
649 void ahash_free_instance(struct crypto_instance
*inst
)
651 crypto_drop_spawn(crypto_instance_ctx(inst
));
652 kfree(ahash_instance(inst
));
654 EXPORT_SYMBOL_GPL(ahash_free_instance
);
656 int crypto_init_ahash_spawn(struct crypto_ahash_spawn
*spawn
,
657 struct hash_alg_common
*alg
,
658 struct crypto_instance
*inst
)
660 return crypto_init_spawn2(&spawn
->base
, &alg
->base
, inst
,
663 EXPORT_SYMBOL_GPL(crypto_init_ahash_spawn
);
665 struct hash_alg_common
*ahash_attr_alg(struct rtattr
*rta
, u32 type
, u32 mask
)
667 struct crypto_alg
*alg
;
669 alg
= crypto_attr_alg2(rta
, &crypto_ahash_type
, type
, mask
);
670 return IS_ERR(alg
) ? ERR_CAST(alg
) : __crypto_hash_alg_common(alg
);
672 EXPORT_SYMBOL_GPL(ahash_attr_alg
);
674 bool crypto_hash_alg_has_setkey(struct hash_alg_common
*halg
)
676 struct crypto_alg
*alg
= &halg
->base
;
678 if (alg
->cra_type
!= &crypto_ahash_type
)
679 return crypto_shash_alg_has_setkey(__crypto_shash_alg(alg
));
681 return __crypto_ahash_alg(alg
)->setkey
!= NULL
;
683 EXPORT_SYMBOL_GPL(crypto_hash_alg_has_setkey
);
685 MODULE_LICENSE("GPL");
686 MODULE_DESCRIPTION("Asynchronous cryptographic hash type");