1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Asynchronous Cryptographic Hash operations.
5 * This is the asynchronous version of hash.c with notification of
6 * completion via a callback.
8 * Copyright (c) 2008 Loc Ho <lho@amcc.com>
11 #include <crypto/internal/hash.h>
12 #include <crypto/scatterwalk.h>
13 #include <linux/bug.h>
14 #include <linux/err.h>
15 #include <linux/kernel.h>
16 #include <linux/module.h>
17 #include <linux/sched.h>
18 #include <linux/slab.h>
19 #include <linux/seq_file.h>
20 #include <linux/cryptouser.h>
21 #include <linux/compiler.h>
22 #include <net/netlink.h>
26 static const struct crypto_type crypto_ahash_type
;
28 struct ahash_request_priv
{
29 crypto_completion_t complete
;
33 void *ubuf
[] CRYPTO_MINALIGN_ATTR
;
36 static inline struct ahash_alg
*crypto_ahash_alg(struct crypto_ahash
*hash
)
38 return container_of(crypto_hash_alg_common(hash
), struct ahash_alg
,
42 static int hash_walk_next(struct crypto_hash_walk
*walk
)
44 unsigned int alignmask
= walk
->alignmask
;
45 unsigned int offset
= walk
->offset
;
46 unsigned int nbytes
= min(walk
->entrylen
,
47 ((unsigned int)(PAGE_SIZE
)) - offset
);
49 if (walk
->flags
& CRYPTO_ALG_ASYNC
)
50 walk
->data
= kmap(walk
->pg
);
52 walk
->data
= kmap_atomic(walk
->pg
);
55 if (offset
& alignmask
) {
56 unsigned int unaligned
= alignmask
+ 1 - (offset
& alignmask
);
58 if (nbytes
> unaligned
)
62 walk
->entrylen
-= nbytes
;
66 static int hash_walk_new_entry(struct crypto_hash_walk
*walk
)
68 struct scatterlist
*sg
;
71 walk
->offset
= sg
->offset
;
72 walk
->pg
= sg_page(walk
->sg
) + (walk
->offset
>> PAGE_SHIFT
);
73 walk
->offset
= offset_in_page(walk
->offset
);
74 walk
->entrylen
= sg
->length
;
76 if (walk
->entrylen
> walk
->total
)
77 walk
->entrylen
= walk
->total
;
78 walk
->total
-= walk
->entrylen
;
80 return hash_walk_next(walk
);
83 int crypto_hash_walk_done(struct crypto_hash_walk
*walk
, int err
)
85 unsigned int alignmask
= walk
->alignmask
;
87 walk
->data
-= walk
->offset
;
89 if (walk
->entrylen
&& (walk
->offset
& alignmask
) && !err
) {
92 walk
->offset
= ALIGN(walk
->offset
, alignmask
+ 1);
93 nbytes
= min(walk
->entrylen
,
94 (unsigned int)(PAGE_SIZE
- walk
->offset
));
96 walk
->entrylen
-= nbytes
;
97 walk
->data
+= walk
->offset
;
102 if (walk
->flags
& CRYPTO_ALG_ASYNC
)
105 kunmap_atomic(walk
->data
);
107 * The may sleep test only makes sense for sync users.
108 * Async users don't need to sleep here anyway.
110 crypto_yield(walk
->flags
);
116 if (walk
->entrylen
) {
119 return hash_walk_next(walk
);
125 walk
->sg
= sg_next(walk
->sg
);
127 return hash_walk_new_entry(walk
);
129 EXPORT_SYMBOL_GPL(crypto_hash_walk_done
);
131 int crypto_hash_walk_first(struct ahash_request
*req
,
132 struct crypto_hash_walk
*walk
)
134 walk
->total
= req
->nbytes
;
141 walk
->alignmask
= crypto_ahash_alignmask(crypto_ahash_reqtfm(req
));
143 walk
->flags
= req
->base
.flags
& CRYPTO_TFM_REQ_MASK
;
145 return hash_walk_new_entry(walk
);
147 EXPORT_SYMBOL_GPL(crypto_hash_walk_first
);
149 int crypto_ahash_walk_first(struct ahash_request
*req
,
150 struct crypto_hash_walk
*walk
)
152 walk
->total
= req
->nbytes
;
159 walk
->alignmask
= crypto_ahash_alignmask(crypto_ahash_reqtfm(req
));
161 walk
->flags
= req
->base
.flags
& CRYPTO_TFM_REQ_MASK
;
162 walk
->flags
|= CRYPTO_ALG_ASYNC
;
164 BUILD_BUG_ON(CRYPTO_TFM_REQ_MASK
& CRYPTO_ALG_ASYNC
);
166 return hash_walk_new_entry(walk
);
168 EXPORT_SYMBOL_GPL(crypto_ahash_walk_first
);
170 static int ahash_setkey_unaligned(struct crypto_ahash
*tfm
, const u8
*key
,
173 unsigned long alignmask
= crypto_ahash_alignmask(tfm
);
175 u8
*buffer
, *alignbuffer
;
176 unsigned long absize
;
178 absize
= keylen
+ alignmask
;
179 buffer
= kmalloc(absize
, GFP_KERNEL
);
183 alignbuffer
= (u8
*)ALIGN((unsigned long)buffer
, alignmask
+ 1);
184 memcpy(alignbuffer
, key
, keylen
);
185 ret
= tfm
->setkey(tfm
, alignbuffer
, keylen
);
190 static int ahash_nosetkey(struct crypto_ahash
*tfm
, const u8
*key
,
196 static void ahash_set_needkey(struct crypto_ahash
*tfm
)
198 const struct hash_alg_common
*alg
= crypto_hash_alg_common(tfm
);
200 if (tfm
->setkey
!= ahash_nosetkey
&&
201 !(alg
->base
.cra_flags
& CRYPTO_ALG_OPTIONAL_KEY
))
202 crypto_ahash_set_flags(tfm
, CRYPTO_TFM_NEED_KEY
);
205 int crypto_ahash_setkey(struct crypto_ahash
*tfm
, const u8
*key
,
208 unsigned long alignmask
= crypto_ahash_alignmask(tfm
);
211 if ((unsigned long)key
& alignmask
)
212 err
= ahash_setkey_unaligned(tfm
, key
, keylen
);
214 err
= tfm
->setkey(tfm
, key
, keylen
);
217 ahash_set_needkey(tfm
);
221 crypto_ahash_clear_flags(tfm
, CRYPTO_TFM_NEED_KEY
);
224 EXPORT_SYMBOL_GPL(crypto_ahash_setkey
);
226 static inline unsigned int ahash_align_buffer_size(unsigned len
,
229 return len
+ (mask
& ~(crypto_tfm_ctx_alignment() - 1));
232 static int ahash_save_req(struct ahash_request
*req
, crypto_completion_t cplt
)
234 struct crypto_ahash
*tfm
= crypto_ahash_reqtfm(req
);
235 unsigned long alignmask
= crypto_ahash_alignmask(tfm
);
236 unsigned int ds
= crypto_ahash_digestsize(tfm
);
237 struct ahash_request_priv
*priv
;
239 priv
= kmalloc(sizeof(*priv
) + ahash_align_buffer_size(ds
, alignmask
),
240 (req
->base
.flags
& CRYPTO_TFM_REQ_MAY_SLEEP
) ?
241 GFP_KERNEL
: GFP_ATOMIC
);
246 * WARNING: Voodoo programming below!
248 * The code below is obscure and hard to understand, thus explanation
249 * is necessary. See include/crypto/hash.h and include/linux/crypto.h
250 * to understand the layout of structures used here!
252 * The code here will replace portions of the ORIGINAL request with
253 * pointers to new code and buffers so the hashing operation can store
254 * the result in aligned buffer. We will call the modified request
255 * an ADJUSTED request.
257 * The newly mangled request will look as such:
260 * .result = ADJUSTED[new aligned buffer]
261 * .base.complete = ADJUSTED[pointer to completion function]
262 * .base.data = ADJUSTED[*req (pointer to self)]
263 * .priv = ADJUSTED[new priv] {
264 * .result = ORIGINAL(result)
265 * .complete = ORIGINAL(base.complete)
266 * .data = ORIGINAL(base.data)
270 priv
->result
= req
->result
;
271 priv
->complete
= req
->base
.complete
;
272 priv
->data
= req
->base
.data
;
273 priv
->flags
= req
->base
.flags
;
276 * WARNING: We do not backup req->priv here! The req->priv
277 * is for internal use of the Crypto API and the
278 * user must _NOT_ _EVER_ depend on it's content!
281 req
->result
= PTR_ALIGN((u8
*)priv
->ubuf
, alignmask
+ 1);
282 req
->base
.complete
= cplt
;
283 req
->base
.data
= req
;
289 static void ahash_restore_req(struct ahash_request
*req
, int err
)
291 struct ahash_request_priv
*priv
= req
->priv
;
294 memcpy(priv
->result
, req
->result
,
295 crypto_ahash_digestsize(crypto_ahash_reqtfm(req
)));
297 /* Restore the original crypto request. */
298 req
->result
= priv
->result
;
300 ahash_request_set_callback(req
, priv
->flags
,
301 priv
->complete
, priv
->data
);
304 /* Free the req->priv.priv from the ADJUSTED request. */
308 static void ahash_notify_einprogress(struct ahash_request
*req
)
310 struct ahash_request_priv
*priv
= req
->priv
;
311 struct crypto_async_request oreq
;
313 oreq
.data
= priv
->data
;
315 priv
->complete(&oreq
, -EINPROGRESS
);
318 static void ahash_op_unaligned_done(struct crypto_async_request
*req
, int err
)
320 struct ahash_request
*areq
= req
->data
;
322 if (err
== -EINPROGRESS
) {
323 ahash_notify_einprogress(areq
);
328 * Restore the original request, see ahash_op_unaligned() for what
331 * The "struct ahash_request *req" here is in fact the "req.base"
332 * from the ADJUSTED request from ahash_op_unaligned(), thus as it
333 * is a pointer to self, it is also the ADJUSTED "req" .
336 /* First copy req->result into req->priv.result */
337 ahash_restore_req(areq
, err
);
339 /* Complete the ORIGINAL request. */
340 areq
->base
.complete(&areq
->base
, err
);
343 static int ahash_op_unaligned(struct ahash_request
*req
,
344 int (*op
)(struct ahash_request
*))
348 err
= ahash_save_req(req
, ahash_op_unaligned_done
);
353 if (err
== -EINPROGRESS
|| err
== -EBUSY
)
356 ahash_restore_req(req
, err
);
361 static int crypto_ahash_op(struct ahash_request
*req
,
362 int (*op
)(struct ahash_request
*))
364 struct crypto_ahash
*tfm
= crypto_ahash_reqtfm(req
);
365 unsigned long alignmask
= crypto_ahash_alignmask(tfm
);
367 if ((unsigned long)req
->result
& alignmask
)
368 return ahash_op_unaligned(req
, op
);
373 int crypto_ahash_final(struct ahash_request
*req
)
375 struct crypto_ahash
*tfm
= crypto_ahash_reqtfm(req
);
376 struct crypto_alg
*alg
= tfm
->base
.__crt_alg
;
377 unsigned int nbytes
= req
->nbytes
;
380 crypto_stats_get(alg
);
381 ret
= crypto_ahash_op(req
, crypto_ahash_reqtfm(req
)->final
);
382 crypto_stats_ahash_final(nbytes
, ret
, alg
);
385 EXPORT_SYMBOL_GPL(crypto_ahash_final
);
387 int crypto_ahash_finup(struct ahash_request
*req
)
389 struct crypto_ahash
*tfm
= crypto_ahash_reqtfm(req
);
390 struct crypto_alg
*alg
= tfm
->base
.__crt_alg
;
391 unsigned int nbytes
= req
->nbytes
;
394 crypto_stats_get(alg
);
395 ret
= crypto_ahash_op(req
, crypto_ahash_reqtfm(req
)->finup
);
396 crypto_stats_ahash_final(nbytes
, ret
, alg
);
399 EXPORT_SYMBOL_GPL(crypto_ahash_finup
);
401 int crypto_ahash_digest(struct ahash_request
*req
)
403 struct crypto_ahash
*tfm
= crypto_ahash_reqtfm(req
);
404 struct crypto_alg
*alg
= tfm
->base
.__crt_alg
;
405 unsigned int nbytes
= req
->nbytes
;
408 crypto_stats_get(alg
);
409 if (crypto_ahash_get_flags(tfm
) & CRYPTO_TFM_NEED_KEY
)
412 ret
= crypto_ahash_op(req
, tfm
->digest
);
413 crypto_stats_ahash_final(nbytes
, ret
, alg
);
416 EXPORT_SYMBOL_GPL(crypto_ahash_digest
);
418 static void ahash_def_finup_done2(struct crypto_async_request
*req
, int err
)
420 struct ahash_request
*areq
= req
->data
;
422 if (err
== -EINPROGRESS
)
425 ahash_restore_req(areq
, err
);
427 areq
->base
.complete(&areq
->base
, err
);
430 static int ahash_def_finup_finish1(struct ahash_request
*req
, int err
)
435 req
->base
.complete
= ahash_def_finup_done2
;
437 err
= crypto_ahash_reqtfm(req
)->final(req
);
438 if (err
== -EINPROGRESS
|| err
== -EBUSY
)
442 ahash_restore_req(req
, err
);
446 static void ahash_def_finup_done1(struct crypto_async_request
*req
, int err
)
448 struct ahash_request
*areq
= req
->data
;
450 if (err
== -EINPROGRESS
) {
451 ahash_notify_einprogress(areq
);
455 areq
->base
.flags
&= ~CRYPTO_TFM_REQ_MAY_SLEEP
;
457 err
= ahash_def_finup_finish1(areq
, err
);
461 areq
->base
.complete(&areq
->base
, err
);
464 static int ahash_def_finup(struct ahash_request
*req
)
466 struct crypto_ahash
*tfm
= crypto_ahash_reqtfm(req
);
469 err
= ahash_save_req(req
, ahash_def_finup_done1
);
473 err
= tfm
->update(req
);
474 if (err
== -EINPROGRESS
|| err
== -EBUSY
)
477 return ahash_def_finup_finish1(req
, err
);
480 static int crypto_ahash_init_tfm(struct crypto_tfm
*tfm
)
482 struct crypto_ahash
*hash
= __crypto_ahash_cast(tfm
);
483 struct ahash_alg
*alg
= crypto_ahash_alg(hash
);
485 hash
->setkey
= ahash_nosetkey
;
487 if (tfm
->__crt_alg
->cra_type
!= &crypto_ahash_type
)
488 return crypto_init_shash_ops_async(tfm
);
490 hash
->init
= alg
->init
;
491 hash
->update
= alg
->update
;
492 hash
->final
= alg
->final
;
493 hash
->finup
= alg
->finup
?: ahash_def_finup
;
494 hash
->digest
= alg
->digest
;
495 hash
->export
= alg
->export
;
496 hash
->import
= alg
->import
;
499 hash
->setkey
= alg
->setkey
;
500 ahash_set_needkey(hash
);
506 static unsigned int crypto_ahash_extsize(struct crypto_alg
*alg
)
508 if (alg
->cra_type
!= &crypto_ahash_type
)
509 return sizeof(struct crypto_shash
*);
511 return crypto_alg_extsize(alg
);
514 static void crypto_ahash_free_instance(struct crypto_instance
*inst
)
516 struct ahash_instance
*ahash
= ahash_instance(inst
);
522 static int crypto_ahash_report(struct sk_buff
*skb
, struct crypto_alg
*alg
)
524 struct crypto_report_hash rhash
;
526 memset(&rhash
, 0, sizeof(rhash
));
528 strscpy(rhash
.type
, "ahash", sizeof(rhash
.type
));
530 rhash
.blocksize
= alg
->cra_blocksize
;
531 rhash
.digestsize
= __crypto_hash_alg_common(alg
)->digestsize
;
533 return nla_put(skb
, CRYPTOCFGA_REPORT_HASH
, sizeof(rhash
), &rhash
);
536 static int crypto_ahash_report(struct sk_buff
*skb
, struct crypto_alg
*alg
)
542 static void crypto_ahash_show(struct seq_file
*m
, struct crypto_alg
*alg
)
544 static void crypto_ahash_show(struct seq_file
*m
, struct crypto_alg
*alg
)
546 seq_printf(m
, "type : ahash\n");
547 seq_printf(m
, "async : %s\n", alg
->cra_flags
& CRYPTO_ALG_ASYNC
?
549 seq_printf(m
, "blocksize : %u\n", alg
->cra_blocksize
);
550 seq_printf(m
, "digestsize : %u\n",
551 __crypto_hash_alg_common(alg
)->digestsize
);
554 static const struct crypto_type crypto_ahash_type
= {
555 .extsize
= crypto_ahash_extsize
,
556 .init_tfm
= crypto_ahash_init_tfm
,
557 .free
= crypto_ahash_free_instance
,
558 #ifdef CONFIG_PROC_FS
559 .show
= crypto_ahash_show
,
561 .report
= crypto_ahash_report
,
562 .maskclear
= ~CRYPTO_ALG_TYPE_MASK
,
563 .maskset
= CRYPTO_ALG_TYPE_AHASH_MASK
,
564 .type
= CRYPTO_ALG_TYPE_AHASH
,
565 .tfmsize
= offsetof(struct crypto_ahash
, base
),
568 int crypto_grab_ahash(struct crypto_ahash_spawn
*spawn
,
569 struct crypto_instance
*inst
,
570 const char *name
, u32 type
, u32 mask
)
572 spawn
->base
.frontend
= &crypto_ahash_type
;
573 return crypto_grab_spawn(&spawn
->base
, inst
, name
, type
, mask
);
575 EXPORT_SYMBOL_GPL(crypto_grab_ahash
);
577 struct crypto_ahash
*crypto_alloc_ahash(const char *alg_name
, u32 type
,
580 return crypto_alloc_tfm(alg_name
, &crypto_ahash_type
, type
, mask
);
582 EXPORT_SYMBOL_GPL(crypto_alloc_ahash
);
584 int crypto_has_ahash(const char *alg_name
, u32 type
, u32 mask
)
586 return crypto_type_has_alg(alg_name
, &crypto_ahash_type
, type
, mask
);
588 EXPORT_SYMBOL_GPL(crypto_has_ahash
);
590 static int ahash_prepare_alg(struct ahash_alg
*alg
)
592 struct crypto_alg
*base
= &alg
->halg
.base
;
594 if (alg
->halg
.digestsize
> HASH_MAX_DIGESTSIZE
||
595 alg
->halg
.statesize
> HASH_MAX_STATESIZE
||
596 alg
->halg
.statesize
== 0)
599 base
->cra_type
= &crypto_ahash_type
;
600 base
->cra_flags
&= ~CRYPTO_ALG_TYPE_MASK
;
601 base
->cra_flags
|= CRYPTO_ALG_TYPE_AHASH
;
606 int crypto_register_ahash(struct ahash_alg
*alg
)
608 struct crypto_alg
*base
= &alg
->halg
.base
;
611 err
= ahash_prepare_alg(alg
);
615 return crypto_register_alg(base
);
617 EXPORT_SYMBOL_GPL(crypto_register_ahash
);
619 void crypto_unregister_ahash(struct ahash_alg
*alg
)
621 crypto_unregister_alg(&alg
->halg
.base
);
623 EXPORT_SYMBOL_GPL(crypto_unregister_ahash
);
625 int crypto_register_ahashes(struct ahash_alg
*algs
, int count
)
629 for (i
= 0; i
< count
; i
++) {
630 ret
= crypto_register_ahash(&algs
[i
]);
638 for (--i
; i
>= 0; --i
)
639 crypto_unregister_ahash(&algs
[i
]);
643 EXPORT_SYMBOL_GPL(crypto_register_ahashes
);
645 void crypto_unregister_ahashes(struct ahash_alg
*algs
, int count
)
649 for (i
= count
- 1; i
>= 0; --i
)
650 crypto_unregister_ahash(&algs
[i
]);
652 EXPORT_SYMBOL_GPL(crypto_unregister_ahashes
);
654 int ahash_register_instance(struct crypto_template
*tmpl
,
655 struct ahash_instance
*inst
)
659 if (WARN_ON(!inst
->free
))
662 err
= ahash_prepare_alg(&inst
->alg
);
666 return crypto_register_instance(tmpl
, ahash_crypto_instance(inst
));
668 EXPORT_SYMBOL_GPL(ahash_register_instance
);
670 bool crypto_hash_alg_has_setkey(struct hash_alg_common
*halg
)
672 struct crypto_alg
*alg
= &halg
->base
;
674 if (alg
->cra_type
!= &crypto_ahash_type
)
675 return crypto_shash_alg_has_setkey(__crypto_shash_alg(alg
));
677 return __crypto_ahash_alg(alg
)->setkey
!= NULL
;
679 EXPORT_SYMBOL_GPL(crypto_hash_alg_has_setkey
);
681 MODULE_LICENSE("GPL");
682 MODULE_DESCRIPTION("Asynchronous cryptographic hash type");