1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Symmetric key cipher operations.
5 * Generic encrypt/decrypt wrapper for ciphers, handles operations across
6 * multiple page boundaries by using temporary blocks. In user context,
7 * the kernel is given a chance to schedule us once per page.
9 * Copyright (c) 2015 Herbert Xu <herbert@gondor.apana.org.au>
12 #include <crypto/internal/aead.h>
13 #include <crypto/internal/cipher.h>
14 #include <crypto/internal/skcipher.h>
15 #include <crypto/scatterwalk.h>
16 #include <linux/bug.h>
17 #include <linux/cryptouser.h>
18 #include <linux/err.h>
19 #include <linux/kernel.h>
20 #include <linux/list.h>
22 #include <linux/module.h>
23 #include <linux/seq_file.h>
24 #include <linux/slab.h>
25 #include <linux/string.h>
26 #include <net/netlink.h>
29 #define CRYPTO_ALG_TYPE_SKCIPHER_MASK 0x0000000e
32 SKCIPHER_WALK_PHYS
= 1 << 0,
33 SKCIPHER_WALK_SLOW
= 1 << 1,
34 SKCIPHER_WALK_COPY
= 1 << 2,
35 SKCIPHER_WALK_DIFF
= 1 << 3,
36 SKCIPHER_WALK_SLEEP
= 1 << 4,
39 struct skcipher_walk_buffer
{
40 struct list_head entry
;
41 struct scatter_walk dst
;
47 static const struct crypto_type crypto_skcipher_type
;
49 static int skcipher_walk_next(struct skcipher_walk
*walk
);
51 static inline void skcipher_map_src(struct skcipher_walk
*walk
)
53 walk
->src
.virt
.addr
= scatterwalk_map(&walk
->in
);
56 static inline void skcipher_map_dst(struct skcipher_walk
*walk
)
58 walk
->dst
.virt
.addr
= scatterwalk_map(&walk
->out
);
61 static inline void skcipher_unmap_src(struct skcipher_walk
*walk
)
63 scatterwalk_unmap(walk
->src
.virt
.addr
);
66 static inline void skcipher_unmap_dst(struct skcipher_walk
*walk
)
68 scatterwalk_unmap(walk
->dst
.virt
.addr
);
71 static inline gfp_t
skcipher_walk_gfp(struct skcipher_walk
*walk
)
73 return walk
->flags
& SKCIPHER_WALK_SLEEP
? GFP_KERNEL
: GFP_ATOMIC
;
76 /* Get a spot of the specified length that does not straddle a page.
77 * The caller needs to ensure that there is enough space for this operation.
79 static inline u8
*skcipher_get_spot(u8
*start
, unsigned int len
)
81 u8
*end_page
= (u8
*)(((unsigned long)(start
+ len
- 1)) & PAGE_MASK
);
83 return max(start
, end_page
);
86 static inline struct skcipher_alg
*__crypto_skcipher_alg(
87 struct crypto_alg
*alg
)
89 return container_of(alg
, struct skcipher_alg
, base
);
92 static int skcipher_done_slow(struct skcipher_walk
*walk
, unsigned int bsize
)
96 addr
= (u8
*)ALIGN((unsigned long)walk
->buffer
, walk
->alignmask
+ 1);
97 addr
= skcipher_get_spot(addr
, bsize
);
98 scatterwalk_copychunks(addr
, &walk
->out
, bsize
,
99 (walk
->flags
& SKCIPHER_WALK_PHYS
) ? 2 : 1);
103 int skcipher_walk_done(struct skcipher_walk
*walk
, int err
)
105 unsigned int n
= walk
->nbytes
;
106 unsigned int nbytes
= 0;
111 if (likely(err
>= 0)) {
113 nbytes
= walk
->total
- n
;
116 if (likely(!(walk
->flags
& (SKCIPHER_WALK_PHYS
|
119 SKCIPHER_WALK_DIFF
)))) {
121 skcipher_unmap_src(walk
);
122 } else if (walk
->flags
& SKCIPHER_WALK_DIFF
) {
123 skcipher_unmap_dst(walk
);
125 } else if (walk
->flags
& SKCIPHER_WALK_COPY
) {
126 skcipher_map_dst(walk
);
127 memcpy(walk
->dst
.virt
.addr
, walk
->page
, n
);
128 skcipher_unmap_dst(walk
);
129 } else if (unlikely(walk
->flags
& SKCIPHER_WALK_SLOW
)) {
132 * Didn't process all bytes. Either the algorithm is
133 * broken, or this was the last step and it turned out
134 * the message wasn't evenly divisible into blocks but
135 * the algorithm requires it.
140 n
= skcipher_done_slow(walk
, n
);
146 walk
->total
= nbytes
;
149 scatterwalk_advance(&walk
->in
, n
);
150 scatterwalk_advance(&walk
->out
, n
);
151 scatterwalk_done(&walk
->in
, 0, nbytes
);
152 scatterwalk_done(&walk
->out
, 1, nbytes
);
155 crypto_yield(walk
->flags
& SKCIPHER_WALK_SLEEP
?
156 CRYPTO_TFM_REQ_MAY_SLEEP
: 0);
157 return skcipher_walk_next(walk
);
161 /* Short-circuit for the common/fast path. */
162 if (!((unsigned long)walk
->buffer
| (unsigned long)walk
->page
))
165 if (walk
->flags
& SKCIPHER_WALK_PHYS
)
168 if (walk
->iv
!= walk
->oiv
)
169 memcpy(walk
->oiv
, walk
->iv
, walk
->ivsize
);
170 if (walk
->buffer
!= walk
->page
)
173 free_page((unsigned long)walk
->page
);
178 EXPORT_SYMBOL_GPL(skcipher_walk_done
);
180 void skcipher_walk_complete(struct skcipher_walk
*walk
, int err
)
182 struct skcipher_walk_buffer
*p
, *tmp
;
184 list_for_each_entry_safe(p
, tmp
, &walk
->buffers
, entry
) {
192 data
= PTR_ALIGN(&p
->buffer
[0], walk
->alignmask
+ 1);
193 data
= skcipher_get_spot(data
, walk
->stride
);
196 scatterwalk_copychunks(data
, &p
->dst
, p
->len
, 1);
198 if (offset_in_page(p
->data
) + p
->len
+ walk
->stride
>
200 free_page((unsigned long)p
->data
);
207 if (!err
&& walk
->iv
!= walk
->oiv
)
208 memcpy(walk
->oiv
, walk
->iv
, walk
->ivsize
);
209 if (walk
->buffer
!= walk
->page
)
212 free_page((unsigned long)walk
->page
);
214 EXPORT_SYMBOL_GPL(skcipher_walk_complete
);
216 static void skcipher_queue_write(struct skcipher_walk
*walk
,
217 struct skcipher_walk_buffer
*p
)
220 list_add_tail(&p
->entry
, &walk
->buffers
);
223 static int skcipher_next_slow(struct skcipher_walk
*walk
, unsigned int bsize
)
225 bool phys
= walk
->flags
& SKCIPHER_WALK_PHYS
;
226 unsigned alignmask
= walk
->alignmask
;
227 struct skcipher_walk_buffer
*p
;
235 walk
->buffer
= walk
->page
;
236 buffer
= walk
->buffer
;
241 /* Start with the minimum alignment of kmalloc. */
242 a
= crypto_tfm_ctx_alignment() - 1;
246 /* Calculate the minimum alignment of p->buffer. */
247 a
&= (sizeof(*p
) ^ (sizeof(*p
) - 1)) >> 1;
251 /* Minimum size to align p->buffer by alignmask. */
254 /* Minimum size to ensure p->buffer does not straddle a page. */
255 n
+= (bsize
- 1) & ~(alignmask
| a
);
257 v
= kzalloc(n
, skcipher_walk_gfp(walk
));
259 return skcipher_walk_done(walk
, -ENOMEM
);
264 skcipher_queue_write(walk
, p
);
272 walk
->dst
.virt
.addr
= PTR_ALIGN(buffer
, alignmask
+ 1);
273 walk
->dst
.virt
.addr
= skcipher_get_spot(walk
->dst
.virt
.addr
, bsize
);
274 walk
->src
.virt
.addr
= walk
->dst
.virt
.addr
;
276 scatterwalk_copychunks(walk
->src
.virt
.addr
, &walk
->in
, bsize
, 0);
278 walk
->nbytes
= bsize
;
279 walk
->flags
|= SKCIPHER_WALK_SLOW
;
284 static int skcipher_next_copy(struct skcipher_walk
*walk
)
286 struct skcipher_walk_buffer
*p
;
287 u8
*tmp
= walk
->page
;
289 skcipher_map_src(walk
);
290 memcpy(tmp
, walk
->src
.virt
.addr
, walk
->nbytes
);
291 skcipher_unmap_src(walk
);
293 walk
->src
.virt
.addr
= tmp
;
294 walk
->dst
.virt
.addr
= tmp
;
296 if (!(walk
->flags
& SKCIPHER_WALK_PHYS
))
299 p
= kmalloc(sizeof(*p
), skcipher_walk_gfp(walk
));
303 p
->data
= walk
->page
;
304 p
->len
= walk
->nbytes
;
305 skcipher_queue_write(walk
, p
);
307 if (offset_in_page(walk
->page
) + walk
->nbytes
+ walk
->stride
>
311 walk
->page
+= walk
->nbytes
;
316 static int skcipher_next_fast(struct skcipher_walk
*walk
)
320 walk
->src
.phys
.page
= scatterwalk_page(&walk
->in
);
321 walk
->src
.phys
.offset
= offset_in_page(walk
->in
.offset
);
322 walk
->dst
.phys
.page
= scatterwalk_page(&walk
->out
);
323 walk
->dst
.phys
.offset
= offset_in_page(walk
->out
.offset
);
325 if (walk
->flags
& SKCIPHER_WALK_PHYS
)
328 diff
= walk
->src
.phys
.offset
- walk
->dst
.phys
.offset
;
329 diff
|= walk
->src
.virt
.page
- walk
->dst
.virt
.page
;
331 skcipher_map_src(walk
);
332 walk
->dst
.virt
.addr
= walk
->src
.virt
.addr
;
335 walk
->flags
|= SKCIPHER_WALK_DIFF
;
336 skcipher_map_dst(walk
);
342 static int skcipher_walk_next(struct skcipher_walk
*walk
)
348 walk
->flags
&= ~(SKCIPHER_WALK_SLOW
| SKCIPHER_WALK_COPY
|
352 bsize
= min(walk
->stride
, max(n
, walk
->blocksize
));
353 n
= scatterwalk_clamp(&walk
->in
, n
);
354 n
= scatterwalk_clamp(&walk
->out
, n
);
356 if (unlikely(n
< bsize
)) {
357 if (unlikely(walk
->total
< walk
->blocksize
))
358 return skcipher_walk_done(walk
, -EINVAL
);
361 err
= skcipher_next_slow(walk
, bsize
);
362 goto set_phys_lowmem
;
365 if (unlikely((walk
->in
.offset
| walk
->out
.offset
) & walk
->alignmask
)) {
367 gfp_t gfp
= skcipher_walk_gfp(walk
);
369 walk
->page
= (void *)__get_free_page(gfp
);
374 walk
->nbytes
= min_t(unsigned, n
,
375 PAGE_SIZE
- offset_in_page(walk
->page
));
376 walk
->flags
|= SKCIPHER_WALK_COPY
;
377 err
= skcipher_next_copy(walk
);
378 goto set_phys_lowmem
;
383 return skcipher_next_fast(walk
);
386 if (!err
&& (walk
->flags
& SKCIPHER_WALK_PHYS
)) {
387 walk
->src
.phys
.page
= virt_to_page(walk
->src
.virt
.addr
);
388 walk
->dst
.phys
.page
= virt_to_page(walk
->dst
.virt
.addr
);
389 walk
->src
.phys
.offset
&= PAGE_SIZE
- 1;
390 walk
->dst
.phys
.offset
&= PAGE_SIZE
- 1;
395 static int skcipher_copy_iv(struct skcipher_walk
*walk
)
397 unsigned a
= crypto_tfm_ctx_alignment() - 1;
398 unsigned alignmask
= walk
->alignmask
;
399 unsigned ivsize
= walk
->ivsize
;
400 unsigned bs
= walk
->stride
;
405 aligned_bs
= ALIGN(bs
, alignmask
+ 1);
407 /* Minimum size to align buffer by alignmask. */
408 size
= alignmask
& ~a
;
410 if (walk
->flags
& SKCIPHER_WALK_PHYS
)
413 size
+= aligned_bs
+ ivsize
;
415 /* Minimum size to ensure buffer does not straddle a page. */
416 size
+= (bs
- 1) & ~(alignmask
| a
);
419 walk
->buffer
= kmalloc(size
, skcipher_walk_gfp(walk
));
423 iv
= PTR_ALIGN(walk
->buffer
, alignmask
+ 1);
424 iv
= skcipher_get_spot(iv
, bs
) + aligned_bs
;
426 walk
->iv
= memcpy(iv
, walk
->iv
, walk
->ivsize
);
430 static int skcipher_walk_first(struct skcipher_walk
*walk
)
432 if (WARN_ON_ONCE(in_hardirq()))
436 if (unlikely(((unsigned long)walk
->iv
& walk
->alignmask
))) {
437 int err
= skcipher_copy_iv(walk
);
444 return skcipher_walk_next(walk
);
447 static int skcipher_walk_skcipher(struct skcipher_walk
*walk
,
448 struct skcipher_request
*req
)
450 struct crypto_skcipher
*tfm
= crypto_skcipher_reqtfm(req
);
451 struct skcipher_alg
*alg
= crypto_skcipher_alg(tfm
);
453 walk
->total
= req
->cryptlen
;
458 if (unlikely(!walk
->total
))
461 scatterwalk_start(&walk
->in
, req
->src
);
462 scatterwalk_start(&walk
->out
, req
->dst
);
464 walk
->flags
&= ~SKCIPHER_WALK_SLEEP
;
465 walk
->flags
|= req
->base
.flags
& CRYPTO_TFM_REQ_MAY_SLEEP
?
466 SKCIPHER_WALK_SLEEP
: 0;
468 walk
->blocksize
= crypto_skcipher_blocksize(tfm
);
469 walk
->ivsize
= crypto_skcipher_ivsize(tfm
);
470 walk
->alignmask
= crypto_skcipher_alignmask(tfm
);
472 if (alg
->co
.base
.cra_type
!= &crypto_skcipher_type
)
473 walk
->stride
= alg
->co
.chunksize
;
475 walk
->stride
= alg
->walksize
;
477 return skcipher_walk_first(walk
);
480 int skcipher_walk_virt(struct skcipher_walk
*walk
,
481 struct skcipher_request
*req
, bool atomic
)
485 might_sleep_if(req
->base
.flags
& CRYPTO_TFM_REQ_MAY_SLEEP
);
487 walk
->flags
&= ~SKCIPHER_WALK_PHYS
;
489 err
= skcipher_walk_skcipher(walk
, req
);
491 walk
->flags
&= atomic
? ~SKCIPHER_WALK_SLEEP
: ~0;
495 EXPORT_SYMBOL_GPL(skcipher_walk_virt
);
497 int skcipher_walk_async(struct skcipher_walk
*walk
,
498 struct skcipher_request
*req
)
500 walk
->flags
|= SKCIPHER_WALK_PHYS
;
502 INIT_LIST_HEAD(&walk
->buffers
);
504 return skcipher_walk_skcipher(walk
, req
);
506 EXPORT_SYMBOL_GPL(skcipher_walk_async
);
508 static int skcipher_walk_aead_common(struct skcipher_walk
*walk
,
509 struct aead_request
*req
, bool atomic
)
511 struct crypto_aead
*tfm
= crypto_aead_reqtfm(req
);
518 if (unlikely(!walk
->total
))
521 walk
->flags
&= ~SKCIPHER_WALK_PHYS
;
523 scatterwalk_start(&walk
->in
, req
->src
);
524 scatterwalk_start(&walk
->out
, req
->dst
);
526 scatterwalk_copychunks(NULL
, &walk
->in
, req
->assoclen
, 2);
527 scatterwalk_copychunks(NULL
, &walk
->out
, req
->assoclen
, 2);
529 scatterwalk_done(&walk
->in
, 0, walk
->total
);
530 scatterwalk_done(&walk
->out
, 0, walk
->total
);
532 if (req
->base
.flags
& CRYPTO_TFM_REQ_MAY_SLEEP
)
533 walk
->flags
|= SKCIPHER_WALK_SLEEP
;
535 walk
->flags
&= ~SKCIPHER_WALK_SLEEP
;
537 walk
->blocksize
= crypto_aead_blocksize(tfm
);
538 walk
->stride
= crypto_aead_chunksize(tfm
);
539 walk
->ivsize
= crypto_aead_ivsize(tfm
);
540 walk
->alignmask
= crypto_aead_alignmask(tfm
);
542 err
= skcipher_walk_first(walk
);
545 walk
->flags
&= ~SKCIPHER_WALK_SLEEP
;
550 int skcipher_walk_aead_encrypt(struct skcipher_walk
*walk
,
551 struct aead_request
*req
, bool atomic
)
553 walk
->total
= req
->cryptlen
;
555 return skcipher_walk_aead_common(walk
, req
, atomic
);
557 EXPORT_SYMBOL_GPL(skcipher_walk_aead_encrypt
);
559 int skcipher_walk_aead_decrypt(struct skcipher_walk
*walk
,
560 struct aead_request
*req
, bool atomic
)
562 struct crypto_aead
*tfm
= crypto_aead_reqtfm(req
);
564 walk
->total
= req
->cryptlen
- crypto_aead_authsize(tfm
);
566 return skcipher_walk_aead_common(walk
, req
, atomic
);
568 EXPORT_SYMBOL_GPL(skcipher_walk_aead_decrypt
);
570 static void skcipher_set_needkey(struct crypto_skcipher
*tfm
)
572 if (crypto_skcipher_max_keysize(tfm
) != 0)
573 crypto_skcipher_set_flags(tfm
, CRYPTO_TFM_NEED_KEY
);
576 static int skcipher_setkey_unaligned(struct crypto_skcipher
*tfm
,
577 const u8
*key
, unsigned int keylen
)
579 unsigned long alignmask
= crypto_skcipher_alignmask(tfm
);
580 struct skcipher_alg
*cipher
= crypto_skcipher_alg(tfm
);
581 u8
*buffer
, *alignbuffer
;
582 unsigned long absize
;
585 absize
= keylen
+ alignmask
;
586 buffer
= kmalloc(absize
, GFP_ATOMIC
);
590 alignbuffer
= (u8
*)ALIGN((unsigned long)buffer
, alignmask
+ 1);
591 memcpy(alignbuffer
, key
, keylen
);
592 ret
= cipher
->setkey(tfm
, alignbuffer
, keylen
);
593 kfree_sensitive(buffer
);
597 int crypto_skcipher_setkey(struct crypto_skcipher
*tfm
, const u8
*key
,
600 struct skcipher_alg
*cipher
= crypto_skcipher_alg(tfm
);
601 unsigned long alignmask
= crypto_skcipher_alignmask(tfm
);
604 if (cipher
->co
.base
.cra_type
!= &crypto_skcipher_type
) {
605 struct crypto_lskcipher
**ctx
= crypto_skcipher_ctx(tfm
);
607 crypto_lskcipher_clear_flags(*ctx
, CRYPTO_TFM_REQ_MASK
);
608 crypto_lskcipher_set_flags(*ctx
,
609 crypto_skcipher_get_flags(tfm
) &
610 CRYPTO_TFM_REQ_MASK
);
611 err
= crypto_lskcipher_setkey(*ctx
, key
, keylen
);
615 if (keylen
< cipher
->min_keysize
|| keylen
> cipher
->max_keysize
)
618 if ((unsigned long)key
& alignmask
)
619 err
= skcipher_setkey_unaligned(tfm
, key
, keylen
);
621 err
= cipher
->setkey(tfm
, key
, keylen
);
625 skcipher_set_needkey(tfm
);
629 crypto_skcipher_clear_flags(tfm
, CRYPTO_TFM_NEED_KEY
);
632 EXPORT_SYMBOL_GPL(crypto_skcipher_setkey
);
634 int crypto_skcipher_encrypt(struct skcipher_request
*req
)
636 struct crypto_skcipher
*tfm
= crypto_skcipher_reqtfm(req
);
637 struct skcipher_alg
*alg
= crypto_skcipher_alg(tfm
);
639 if (crypto_skcipher_get_flags(tfm
) & CRYPTO_TFM_NEED_KEY
)
641 if (alg
->co
.base
.cra_type
!= &crypto_skcipher_type
)
642 return crypto_lskcipher_encrypt_sg(req
);
643 return alg
->encrypt(req
);
645 EXPORT_SYMBOL_GPL(crypto_skcipher_encrypt
);
647 int crypto_skcipher_decrypt(struct skcipher_request
*req
)
649 struct crypto_skcipher
*tfm
= crypto_skcipher_reqtfm(req
);
650 struct skcipher_alg
*alg
= crypto_skcipher_alg(tfm
);
652 if (crypto_skcipher_get_flags(tfm
) & CRYPTO_TFM_NEED_KEY
)
654 if (alg
->co
.base
.cra_type
!= &crypto_skcipher_type
)
655 return crypto_lskcipher_decrypt_sg(req
);
656 return alg
->decrypt(req
);
658 EXPORT_SYMBOL_GPL(crypto_skcipher_decrypt
);
660 static int crypto_lskcipher_export(struct skcipher_request
*req
, void *out
)
662 struct crypto_skcipher
*tfm
= crypto_skcipher_reqtfm(req
);
663 u8
*ivs
= skcipher_request_ctx(req
);
665 ivs
= PTR_ALIGN(ivs
, crypto_skcipher_alignmask(tfm
) + 1);
667 memcpy(out
, ivs
+ crypto_skcipher_ivsize(tfm
),
668 crypto_skcipher_statesize(tfm
));
673 static int crypto_lskcipher_import(struct skcipher_request
*req
, const void *in
)
675 struct crypto_skcipher
*tfm
= crypto_skcipher_reqtfm(req
);
676 u8
*ivs
= skcipher_request_ctx(req
);
678 ivs
= PTR_ALIGN(ivs
, crypto_skcipher_alignmask(tfm
) + 1);
680 memcpy(ivs
+ crypto_skcipher_ivsize(tfm
), in
,
681 crypto_skcipher_statesize(tfm
));
686 static int skcipher_noexport(struct skcipher_request
*req
, void *out
)
691 static int skcipher_noimport(struct skcipher_request
*req
, const void *in
)
696 int crypto_skcipher_export(struct skcipher_request
*req
, void *out
)
698 struct crypto_skcipher
*tfm
= crypto_skcipher_reqtfm(req
);
699 struct skcipher_alg
*alg
= crypto_skcipher_alg(tfm
);
701 if (alg
->co
.base
.cra_type
!= &crypto_skcipher_type
)
702 return crypto_lskcipher_export(req
, out
);
703 return alg
->export(req
, out
);
705 EXPORT_SYMBOL_GPL(crypto_skcipher_export
);
707 int crypto_skcipher_import(struct skcipher_request
*req
, const void *in
)
709 struct crypto_skcipher
*tfm
= crypto_skcipher_reqtfm(req
);
710 struct skcipher_alg
*alg
= crypto_skcipher_alg(tfm
);
712 if (alg
->co
.base
.cra_type
!= &crypto_skcipher_type
)
713 return crypto_lskcipher_import(req
, in
);
714 return alg
->import(req
, in
);
716 EXPORT_SYMBOL_GPL(crypto_skcipher_import
);
718 static void crypto_skcipher_exit_tfm(struct crypto_tfm
*tfm
)
720 struct crypto_skcipher
*skcipher
= __crypto_skcipher_cast(tfm
);
721 struct skcipher_alg
*alg
= crypto_skcipher_alg(skcipher
);
726 static int crypto_skcipher_init_tfm(struct crypto_tfm
*tfm
)
728 struct crypto_skcipher
*skcipher
= __crypto_skcipher_cast(tfm
);
729 struct skcipher_alg
*alg
= crypto_skcipher_alg(skcipher
);
731 skcipher_set_needkey(skcipher
);
733 if (tfm
->__crt_alg
->cra_type
!= &crypto_skcipher_type
) {
734 unsigned am
= crypto_skcipher_alignmask(skcipher
);
737 reqsize
= am
& ~(crypto_tfm_ctx_alignment() - 1);
738 reqsize
+= crypto_skcipher_ivsize(skcipher
);
739 reqsize
+= crypto_skcipher_statesize(skcipher
);
740 crypto_skcipher_set_reqsize(skcipher
, reqsize
);
742 return crypto_init_lskcipher_ops_sg(tfm
);
746 skcipher
->base
.exit
= crypto_skcipher_exit_tfm
;
749 return alg
->init(skcipher
);
754 static unsigned int crypto_skcipher_extsize(struct crypto_alg
*alg
)
756 if (alg
->cra_type
!= &crypto_skcipher_type
)
757 return sizeof(struct crypto_lskcipher
*);
759 return crypto_alg_extsize(alg
);
762 static void crypto_skcipher_free_instance(struct crypto_instance
*inst
)
764 struct skcipher_instance
*skcipher
=
765 container_of(inst
, struct skcipher_instance
, s
.base
);
767 skcipher
->free(skcipher
);
770 static void crypto_skcipher_show(struct seq_file
*m
, struct crypto_alg
*alg
)
772 static void crypto_skcipher_show(struct seq_file
*m
, struct crypto_alg
*alg
)
774 struct skcipher_alg
*skcipher
= __crypto_skcipher_alg(alg
);
776 seq_printf(m
, "type : skcipher\n");
777 seq_printf(m
, "async : %s\n",
778 alg
->cra_flags
& CRYPTO_ALG_ASYNC
? "yes" : "no");
779 seq_printf(m
, "blocksize : %u\n", alg
->cra_blocksize
);
780 seq_printf(m
, "min keysize : %u\n", skcipher
->min_keysize
);
781 seq_printf(m
, "max keysize : %u\n", skcipher
->max_keysize
);
782 seq_printf(m
, "ivsize : %u\n", skcipher
->ivsize
);
783 seq_printf(m
, "chunksize : %u\n", skcipher
->chunksize
);
784 seq_printf(m
, "walksize : %u\n", skcipher
->walksize
);
785 seq_printf(m
, "statesize : %u\n", skcipher
->statesize
);
788 static int __maybe_unused
crypto_skcipher_report(
789 struct sk_buff
*skb
, struct crypto_alg
*alg
)
791 struct skcipher_alg
*skcipher
= __crypto_skcipher_alg(alg
);
792 struct crypto_report_blkcipher rblkcipher
;
794 memset(&rblkcipher
, 0, sizeof(rblkcipher
));
796 strscpy(rblkcipher
.type
, "skcipher", sizeof(rblkcipher
.type
));
797 strscpy(rblkcipher
.geniv
, "<none>", sizeof(rblkcipher
.geniv
));
799 rblkcipher
.blocksize
= alg
->cra_blocksize
;
800 rblkcipher
.min_keysize
= skcipher
->min_keysize
;
801 rblkcipher
.max_keysize
= skcipher
->max_keysize
;
802 rblkcipher
.ivsize
= skcipher
->ivsize
;
804 return nla_put(skb
, CRYPTOCFGA_REPORT_BLKCIPHER
,
805 sizeof(rblkcipher
), &rblkcipher
);
808 static const struct crypto_type crypto_skcipher_type
= {
809 .extsize
= crypto_skcipher_extsize
,
810 .init_tfm
= crypto_skcipher_init_tfm
,
811 .free
= crypto_skcipher_free_instance
,
812 #ifdef CONFIG_PROC_FS
813 .show
= crypto_skcipher_show
,
815 #if IS_ENABLED(CONFIG_CRYPTO_USER)
816 .report
= crypto_skcipher_report
,
818 .maskclear
= ~CRYPTO_ALG_TYPE_MASK
,
819 .maskset
= CRYPTO_ALG_TYPE_SKCIPHER_MASK
,
820 .type
= CRYPTO_ALG_TYPE_SKCIPHER
,
821 .tfmsize
= offsetof(struct crypto_skcipher
, base
),
824 int crypto_grab_skcipher(struct crypto_skcipher_spawn
*spawn
,
825 struct crypto_instance
*inst
,
826 const char *name
, u32 type
, u32 mask
)
828 spawn
->base
.frontend
= &crypto_skcipher_type
;
829 return crypto_grab_spawn(&spawn
->base
, inst
, name
, type
, mask
);
831 EXPORT_SYMBOL_GPL(crypto_grab_skcipher
);
833 struct crypto_skcipher
*crypto_alloc_skcipher(const char *alg_name
,
836 return crypto_alloc_tfm(alg_name
, &crypto_skcipher_type
, type
, mask
);
838 EXPORT_SYMBOL_GPL(crypto_alloc_skcipher
);
840 struct crypto_sync_skcipher
*crypto_alloc_sync_skcipher(
841 const char *alg_name
, u32 type
, u32 mask
)
843 struct crypto_skcipher
*tfm
;
845 /* Only sync algorithms allowed. */
846 mask
|= CRYPTO_ALG_ASYNC
| CRYPTO_ALG_SKCIPHER_REQSIZE_LARGE
;
848 tfm
= crypto_alloc_tfm(alg_name
, &crypto_skcipher_type
, type
, mask
);
851 * Make sure we do not allocate something that might get used with
852 * an on-stack request: check the request size.
854 if (!IS_ERR(tfm
) && WARN_ON(crypto_skcipher_reqsize(tfm
) >
855 MAX_SYNC_SKCIPHER_REQSIZE
)) {
856 crypto_free_skcipher(tfm
);
857 return ERR_PTR(-EINVAL
);
860 return (struct crypto_sync_skcipher
*)tfm
;
862 EXPORT_SYMBOL_GPL(crypto_alloc_sync_skcipher
);
864 int crypto_has_skcipher(const char *alg_name
, u32 type
, u32 mask
)
866 return crypto_type_has_alg(alg_name
, &crypto_skcipher_type
, type
, mask
);
868 EXPORT_SYMBOL_GPL(crypto_has_skcipher
);
870 int skcipher_prepare_alg_common(struct skcipher_alg_common
*alg
)
872 struct crypto_alg
*base
= &alg
->base
;
874 if (alg
->ivsize
> PAGE_SIZE
/ 8 || alg
->chunksize
> PAGE_SIZE
/ 8 ||
875 alg
->statesize
> PAGE_SIZE
/ 2 ||
876 (alg
->ivsize
+ alg
->statesize
) > PAGE_SIZE
/ 2)
880 alg
->chunksize
= base
->cra_blocksize
;
882 base
->cra_flags
&= ~CRYPTO_ALG_TYPE_MASK
;
887 static int skcipher_prepare_alg(struct skcipher_alg
*alg
)
889 struct crypto_alg
*base
= &alg
->base
;
892 err
= skcipher_prepare_alg_common(&alg
->co
);
896 if (alg
->walksize
> PAGE_SIZE
/ 8)
900 alg
->walksize
= alg
->chunksize
;
902 if (!alg
->statesize
) {
903 alg
->import
= skcipher_noimport
;
904 alg
->export
= skcipher_noexport
;
905 } else if (!(alg
->import
&& alg
->export
))
908 base
->cra_type
= &crypto_skcipher_type
;
909 base
->cra_flags
|= CRYPTO_ALG_TYPE_SKCIPHER
;
914 int crypto_register_skcipher(struct skcipher_alg
*alg
)
916 struct crypto_alg
*base
= &alg
->base
;
919 err
= skcipher_prepare_alg(alg
);
923 return crypto_register_alg(base
);
925 EXPORT_SYMBOL_GPL(crypto_register_skcipher
);
927 void crypto_unregister_skcipher(struct skcipher_alg
*alg
)
929 crypto_unregister_alg(&alg
->base
);
931 EXPORT_SYMBOL_GPL(crypto_unregister_skcipher
);
933 int crypto_register_skciphers(struct skcipher_alg
*algs
, int count
)
937 for (i
= 0; i
< count
; i
++) {
938 ret
= crypto_register_skcipher(&algs
[i
]);
946 for (--i
; i
>= 0; --i
)
947 crypto_unregister_skcipher(&algs
[i
]);
951 EXPORT_SYMBOL_GPL(crypto_register_skciphers
);
953 void crypto_unregister_skciphers(struct skcipher_alg
*algs
, int count
)
957 for (i
= count
- 1; i
>= 0; --i
)
958 crypto_unregister_skcipher(&algs
[i
]);
960 EXPORT_SYMBOL_GPL(crypto_unregister_skciphers
);
962 int skcipher_register_instance(struct crypto_template
*tmpl
,
963 struct skcipher_instance
*inst
)
967 if (WARN_ON(!inst
->free
))
970 err
= skcipher_prepare_alg(&inst
->alg
);
974 return crypto_register_instance(tmpl
, skcipher_crypto_instance(inst
));
976 EXPORT_SYMBOL_GPL(skcipher_register_instance
);
978 static int skcipher_setkey_simple(struct crypto_skcipher
*tfm
, const u8
*key
,
981 struct crypto_cipher
*cipher
= skcipher_cipher_simple(tfm
);
983 crypto_cipher_clear_flags(cipher
, CRYPTO_TFM_REQ_MASK
);
984 crypto_cipher_set_flags(cipher
, crypto_skcipher_get_flags(tfm
) &
985 CRYPTO_TFM_REQ_MASK
);
986 return crypto_cipher_setkey(cipher
, key
, keylen
);
989 static int skcipher_init_tfm_simple(struct crypto_skcipher
*tfm
)
991 struct skcipher_instance
*inst
= skcipher_alg_instance(tfm
);
992 struct crypto_cipher_spawn
*spawn
= skcipher_instance_ctx(inst
);
993 struct skcipher_ctx_simple
*ctx
= crypto_skcipher_ctx(tfm
);
994 struct crypto_cipher
*cipher
;
996 cipher
= crypto_spawn_cipher(spawn
);
998 return PTR_ERR(cipher
);
1000 ctx
->cipher
= cipher
;
1004 static void skcipher_exit_tfm_simple(struct crypto_skcipher
*tfm
)
1006 struct skcipher_ctx_simple
*ctx
= crypto_skcipher_ctx(tfm
);
1008 crypto_free_cipher(ctx
->cipher
);
1011 static void skcipher_free_instance_simple(struct skcipher_instance
*inst
)
1013 crypto_drop_cipher(skcipher_instance_ctx(inst
));
1018 * skcipher_alloc_instance_simple - allocate instance of simple block cipher mode
1020 * Allocate an skcipher_instance for a simple block cipher mode of operation,
1021 * e.g. cbc or ecb. The instance context will have just a single crypto_spawn,
1022 * that for the underlying cipher. The {min,max}_keysize, ivsize, blocksize,
1023 * alignmask, and priority are set from the underlying cipher but can be
1024 * overridden if needed. The tfm context defaults to skcipher_ctx_simple, and
1025 * default ->setkey(), ->init(), and ->exit() methods are installed.
1027 * @tmpl: the template being instantiated
1028 * @tb: the template parameters
1030 * Return: a pointer to the new instance, or an ERR_PTR(). The caller still
1031 * needs to register the instance.
1033 struct skcipher_instance
*skcipher_alloc_instance_simple(
1034 struct crypto_template
*tmpl
, struct rtattr
**tb
)
1037 struct skcipher_instance
*inst
;
1038 struct crypto_cipher_spawn
*spawn
;
1039 struct crypto_alg
*cipher_alg
;
1042 err
= crypto_check_attr_type(tb
, CRYPTO_ALG_TYPE_SKCIPHER
, &mask
);
1044 return ERR_PTR(err
);
1046 inst
= kzalloc(sizeof(*inst
) + sizeof(*spawn
), GFP_KERNEL
);
1048 return ERR_PTR(-ENOMEM
);
1049 spawn
= skcipher_instance_ctx(inst
);
1051 err
= crypto_grab_cipher(spawn
, skcipher_crypto_instance(inst
),
1052 crypto_attr_alg_name(tb
[1]), 0, mask
);
1055 cipher_alg
= crypto_spawn_cipher_alg(spawn
);
1057 err
= crypto_inst_setname(skcipher_crypto_instance(inst
), tmpl
->name
,
1062 inst
->free
= skcipher_free_instance_simple
;
1064 /* Default algorithm properties, can be overridden */
1065 inst
->alg
.base
.cra_blocksize
= cipher_alg
->cra_blocksize
;
1066 inst
->alg
.base
.cra_alignmask
= cipher_alg
->cra_alignmask
;
1067 inst
->alg
.base
.cra_priority
= cipher_alg
->cra_priority
;
1068 inst
->alg
.min_keysize
= cipher_alg
->cra_cipher
.cia_min_keysize
;
1069 inst
->alg
.max_keysize
= cipher_alg
->cra_cipher
.cia_max_keysize
;
1070 inst
->alg
.ivsize
= cipher_alg
->cra_blocksize
;
1072 /* Use skcipher_ctx_simple by default, can be overridden */
1073 inst
->alg
.base
.cra_ctxsize
= sizeof(struct skcipher_ctx_simple
);
1074 inst
->alg
.setkey
= skcipher_setkey_simple
;
1075 inst
->alg
.init
= skcipher_init_tfm_simple
;
1076 inst
->alg
.exit
= skcipher_exit_tfm_simple
;
1081 skcipher_free_instance_simple(inst
);
1082 return ERR_PTR(err
);
1084 EXPORT_SYMBOL_GPL(skcipher_alloc_instance_simple
);
1086 MODULE_LICENSE("GPL");
1087 MODULE_DESCRIPTION("Symmetric key cipher type");
1088 MODULE_IMPORT_NS("CRYPTO_INTERNAL");