1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Symmetric key cipher operations.
5 * Generic encrypt/decrypt wrapper for ciphers, handles operations across
6 * multiple page boundaries by using temporary blocks. In user context,
7 * the kernel is given a chance to schedule us once per page.
9 * Copyright (c) 2015 Herbert Xu <herbert@gondor.apana.org.au>
12 #include <crypto/internal/aead.h>
13 #include <crypto/internal/skcipher.h>
14 #include <crypto/scatterwalk.h>
15 #include <linux/bug.h>
16 #include <linux/cryptouser.h>
17 #include <linux/compiler.h>
18 #include <linux/list.h>
19 #include <linux/module.h>
20 #include <linux/rtnetlink.h>
21 #include <linux/seq_file.h>
22 #include <net/netlink.h>
27 SKCIPHER_WALK_PHYS
= 1 << 0,
28 SKCIPHER_WALK_SLOW
= 1 << 1,
29 SKCIPHER_WALK_COPY
= 1 << 2,
30 SKCIPHER_WALK_DIFF
= 1 << 3,
31 SKCIPHER_WALK_SLEEP
= 1 << 4,
34 struct skcipher_walk_buffer
{
35 struct list_head entry
;
36 struct scatter_walk dst
;
42 static int skcipher_walk_next(struct skcipher_walk
*walk
);
44 static inline void skcipher_unmap(struct scatter_walk
*walk
, void *vaddr
)
46 if (PageHighMem(scatterwalk_page(walk
)))
50 static inline void *skcipher_map(struct scatter_walk
*walk
)
52 struct page
*page
= scatterwalk_page(walk
);
54 return (PageHighMem(page
) ? kmap_atomic(page
) : page_address(page
)) +
55 offset_in_page(walk
->offset
);
58 static inline void skcipher_map_src(struct skcipher_walk
*walk
)
60 walk
->src
.virt
.addr
= skcipher_map(&walk
->in
);
63 static inline void skcipher_map_dst(struct skcipher_walk
*walk
)
65 walk
->dst
.virt
.addr
= skcipher_map(&walk
->out
);
68 static inline void skcipher_unmap_src(struct skcipher_walk
*walk
)
70 skcipher_unmap(&walk
->in
, walk
->src
.virt
.addr
);
73 static inline void skcipher_unmap_dst(struct skcipher_walk
*walk
)
75 skcipher_unmap(&walk
->out
, walk
->dst
.virt
.addr
);
78 static inline gfp_t
skcipher_walk_gfp(struct skcipher_walk
*walk
)
80 return walk
->flags
& SKCIPHER_WALK_SLEEP
? GFP_KERNEL
: GFP_ATOMIC
;
83 /* Get a spot of the specified length that does not straddle a page.
84 * The caller needs to ensure that there is enough space for this operation.
86 static inline u8
*skcipher_get_spot(u8
*start
, unsigned int len
)
88 u8
*end_page
= (u8
*)(((unsigned long)(start
+ len
- 1)) & PAGE_MASK
);
90 return max(start
, end_page
);
93 static int skcipher_done_slow(struct skcipher_walk
*walk
, unsigned int bsize
)
97 addr
= (u8
*)ALIGN((unsigned long)walk
->buffer
, walk
->alignmask
+ 1);
98 addr
= skcipher_get_spot(addr
, bsize
);
99 scatterwalk_copychunks(addr
, &walk
->out
, bsize
,
100 (walk
->flags
& SKCIPHER_WALK_PHYS
) ? 2 : 1);
104 int skcipher_walk_done(struct skcipher_walk
*walk
, int err
)
106 unsigned int n
= walk
->nbytes
;
107 unsigned int nbytes
= 0;
112 if (likely(err
>= 0)) {
114 nbytes
= walk
->total
- n
;
117 if (likely(!(walk
->flags
& (SKCIPHER_WALK_PHYS
|
120 SKCIPHER_WALK_DIFF
)))) {
122 skcipher_unmap_src(walk
);
123 } else if (walk
->flags
& SKCIPHER_WALK_DIFF
) {
124 skcipher_unmap_dst(walk
);
126 } else if (walk
->flags
& SKCIPHER_WALK_COPY
) {
127 skcipher_map_dst(walk
);
128 memcpy(walk
->dst
.virt
.addr
, walk
->page
, n
);
129 skcipher_unmap_dst(walk
);
130 } else if (unlikely(walk
->flags
& SKCIPHER_WALK_SLOW
)) {
133 * Didn't process all bytes. Either the algorithm is
134 * broken, or this was the last step and it turned out
135 * the message wasn't evenly divisible into blocks but
136 * the algorithm requires it.
141 n
= skcipher_done_slow(walk
, n
);
147 walk
->total
= nbytes
;
150 scatterwalk_advance(&walk
->in
, n
);
151 scatterwalk_advance(&walk
->out
, n
);
152 scatterwalk_done(&walk
->in
, 0, nbytes
);
153 scatterwalk_done(&walk
->out
, 1, nbytes
);
156 crypto_yield(walk
->flags
& SKCIPHER_WALK_SLEEP
?
157 CRYPTO_TFM_REQ_MAY_SLEEP
: 0);
158 return skcipher_walk_next(walk
);
162 /* Short-circuit for the common/fast path. */
163 if (!((unsigned long)walk
->buffer
| (unsigned long)walk
->page
))
166 if (walk
->flags
& SKCIPHER_WALK_PHYS
)
169 if (walk
->iv
!= walk
->oiv
)
170 memcpy(walk
->oiv
, walk
->iv
, walk
->ivsize
);
171 if (walk
->buffer
!= walk
->page
)
174 free_page((unsigned long)walk
->page
);
179 EXPORT_SYMBOL_GPL(skcipher_walk_done
);
181 void skcipher_walk_complete(struct skcipher_walk
*walk
, int err
)
183 struct skcipher_walk_buffer
*p
, *tmp
;
185 list_for_each_entry_safe(p
, tmp
, &walk
->buffers
, entry
) {
193 data
= PTR_ALIGN(&p
->buffer
[0], walk
->alignmask
+ 1);
194 data
= skcipher_get_spot(data
, walk
->stride
);
197 scatterwalk_copychunks(data
, &p
->dst
, p
->len
, 1);
199 if (offset_in_page(p
->data
) + p
->len
+ walk
->stride
>
201 free_page((unsigned long)p
->data
);
208 if (!err
&& walk
->iv
!= walk
->oiv
)
209 memcpy(walk
->oiv
, walk
->iv
, walk
->ivsize
);
210 if (walk
->buffer
!= walk
->page
)
213 free_page((unsigned long)walk
->page
);
215 EXPORT_SYMBOL_GPL(skcipher_walk_complete
);
217 static void skcipher_queue_write(struct skcipher_walk
*walk
,
218 struct skcipher_walk_buffer
*p
)
221 list_add_tail(&p
->entry
, &walk
->buffers
);
224 static int skcipher_next_slow(struct skcipher_walk
*walk
, unsigned int bsize
)
226 bool phys
= walk
->flags
& SKCIPHER_WALK_PHYS
;
227 unsigned alignmask
= walk
->alignmask
;
228 struct skcipher_walk_buffer
*p
;
236 walk
->buffer
= walk
->page
;
237 buffer
= walk
->buffer
;
242 /* Start with the minimum alignment of kmalloc. */
243 a
= crypto_tfm_ctx_alignment() - 1;
247 /* Calculate the minimum alignment of p->buffer. */
248 a
&= (sizeof(*p
) ^ (sizeof(*p
) - 1)) >> 1;
252 /* Minimum size to align p->buffer by alignmask. */
255 /* Minimum size to ensure p->buffer does not straddle a page. */
256 n
+= (bsize
- 1) & ~(alignmask
| a
);
258 v
= kzalloc(n
, skcipher_walk_gfp(walk
));
260 return skcipher_walk_done(walk
, -ENOMEM
);
265 skcipher_queue_write(walk
, p
);
273 walk
->dst
.virt
.addr
= PTR_ALIGN(buffer
, alignmask
+ 1);
274 walk
->dst
.virt
.addr
= skcipher_get_spot(walk
->dst
.virt
.addr
, bsize
);
275 walk
->src
.virt
.addr
= walk
->dst
.virt
.addr
;
277 scatterwalk_copychunks(walk
->src
.virt
.addr
, &walk
->in
, bsize
, 0);
279 walk
->nbytes
= bsize
;
280 walk
->flags
|= SKCIPHER_WALK_SLOW
;
285 static int skcipher_next_copy(struct skcipher_walk
*walk
)
287 struct skcipher_walk_buffer
*p
;
288 u8
*tmp
= walk
->page
;
290 skcipher_map_src(walk
);
291 memcpy(tmp
, walk
->src
.virt
.addr
, walk
->nbytes
);
292 skcipher_unmap_src(walk
);
294 walk
->src
.virt
.addr
= tmp
;
295 walk
->dst
.virt
.addr
= tmp
;
297 if (!(walk
->flags
& SKCIPHER_WALK_PHYS
))
300 p
= kmalloc(sizeof(*p
), skcipher_walk_gfp(walk
));
304 p
->data
= walk
->page
;
305 p
->len
= walk
->nbytes
;
306 skcipher_queue_write(walk
, p
);
308 if (offset_in_page(walk
->page
) + walk
->nbytes
+ walk
->stride
>
312 walk
->page
+= walk
->nbytes
;
317 static int skcipher_next_fast(struct skcipher_walk
*walk
)
321 walk
->src
.phys
.page
= scatterwalk_page(&walk
->in
);
322 walk
->src
.phys
.offset
= offset_in_page(walk
->in
.offset
);
323 walk
->dst
.phys
.page
= scatterwalk_page(&walk
->out
);
324 walk
->dst
.phys
.offset
= offset_in_page(walk
->out
.offset
);
326 if (walk
->flags
& SKCIPHER_WALK_PHYS
)
329 diff
= walk
->src
.phys
.offset
- walk
->dst
.phys
.offset
;
330 diff
|= walk
->src
.virt
.page
- walk
->dst
.virt
.page
;
332 skcipher_map_src(walk
);
333 walk
->dst
.virt
.addr
= walk
->src
.virt
.addr
;
336 walk
->flags
|= SKCIPHER_WALK_DIFF
;
337 skcipher_map_dst(walk
);
343 static int skcipher_walk_next(struct skcipher_walk
*walk
)
349 walk
->flags
&= ~(SKCIPHER_WALK_SLOW
| SKCIPHER_WALK_COPY
|
353 bsize
= min(walk
->stride
, max(n
, walk
->blocksize
));
354 n
= scatterwalk_clamp(&walk
->in
, n
);
355 n
= scatterwalk_clamp(&walk
->out
, n
);
357 if (unlikely(n
< bsize
)) {
358 if (unlikely(walk
->total
< walk
->blocksize
))
359 return skcipher_walk_done(walk
, -EINVAL
);
362 err
= skcipher_next_slow(walk
, bsize
);
363 goto set_phys_lowmem
;
366 if (unlikely((walk
->in
.offset
| walk
->out
.offset
) & walk
->alignmask
)) {
368 gfp_t gfp
= skcipher_walk_gfp(walk
);
370 walk
->page
= (void *)__get_free_page(gfp
);
375 walk
->nbytes
= min_t(unsigned, n
,
376 PAGE_SIZE
- offset_in_page(walk
->page
));
377 walk
->flags
|= SKCIPHER_WALK_COPY
;
378 err
= skcipher_next_copy(walk
);
379 goto set_phys_lowmem
;
384 return skcipher_next_fast(walk
);
387 if (!err
&& (walk
->flags
& SKCIPHER_WALK_PHYS
)) {
388 walk
->src
.phys
.page
= virt_to_page(walk
->src
.virt
.addr
);
389 walk
->dst
.phys
.page
= virt_to_page(walk
->dst
.virt
.addr
);
390 walk
->src
.phys
.offset
&= PAGE_SIZE
- 1;
391 walk
->dst
.phys
.offset
&= PAGE_SIZE
- 1;
396 static int skcipher_copy_iv(struct skcipher_walk
*walk
)
398 unsigned a
= crypto_tfm_ctx_alignment() - 1;
399 unsigned alignmask
= walk
->alignmask
;
400 unsigned ivsize
= walk
->ivsize
;
401 unsigned bs
= walk
->stride
;
406 aligned_bs
= ALIGN(bs
, alignmask
+ 1);
408 /* Minimum size to align buffer by alignmask. */
409 size
= alignmask
& ~a
;
411 if (walk
->flags
& SKCIPHER_WALK_PHYS
)
414 size
+= aligned_bs
+ ivsize
;
416 /* Minimum size to ensure buffer does not straddle a page. */
417 size
+= (bs
- 1) & ~(alignmask
| a
);
420 walk
->buffer
= kmalloc(size
, skcipher_walk_gfp(walk
));
424 iv
= PTR_ALIGN(walk
->buffer
, alignmask
+ 1);
425 iv
= skcipher_get_spot(iv
, bs
) + aligned_bs
;
427 walk
->iv
= memcpy(iv
, walk
->iv
, walk
->ivsize
);
431 static int skcipher_walk_first(struct skcipher_walk
*walk
)
433 if (WARN_ON_ONCE(in_irq()))
437 if (unlikely(((unsigned long)walk
->iv
& walk
->alignmask
))) {
438 int err
= skcipher_copy_iv(walk
);
445 return skcipher_walk_next(walk
);
448 static int skcipher_walk_skcipher(struct skcipher_walk
*walk
,
449 struct skcipher_request
*req
)
451 struct crypto_skcipher
*tfm
= crypto_skcipher_reqtfm(req
);
453 walk
->total
= req
->cryptlen
;
458 if (unlikely(!walk
->total
))
461 scatterwalk_start(&walk
->in
, req
->src
);
462 scatterwalk_start(&walk
->out
, req
->dst
);
464 walk
->flags
&= ~SKCIPHER_WALK_SLEEP
;
465 walk
->flags
|= req
->base
.flags
& CRYPTO_TFM_REQ_MAY_SLEEP
?
466 SKCIPHER_WALK_SLEEP
: 0;
468 walk
->blocksize
= crypto_skcipher_blocksize(tfm
);
469 walk
->stride
= crypto_skcipher_walksize(tfm
);
470 walk
->ivsize
= crypto_skcipher_ivsize(tfm
);
471 walk
->alignmask
= crypto_skcipher_alignmask(tfm
);
473 return skcipher_walk_first(walk
);
476 int skcipher_walk_virt(struct skcipher_walk
*walk
,
477 struct skcipher_request
*req
, bool atomic
)
481 might_sleep_if(req
->base
.flags
& CRYPTO_TFM_REQ_MAY_SLEEP
);
483 walk
->flags
&= ~SKCIPHER_WALK_PHYS
;
485 err
= skcipher_walk_skcipher(walk
, req
);
487 walk
->flags
&= atomic
? ~SKCIPHER_WALK_SLEEP
: ~0;
491 EXPORT_SYMBOL_GPL(skcipher_walk_virt
);
493 void skcipher_walk_atomise(struct skcipher_walk
*walk
)
495 walk
->flags
&= ~SKCIPHER_WALK_SLEEP
;
497 EXPORT_SYMBOL_GPL(skcipher_walk_atomise
);
499 int skcipher_walk_async(struct skcipher_walk
*walk
,
500 struct skcipher_request
*req
)
502 walk
->flags
|= SKCIPHER_WALK_PHYS
;
504 INIT_LIST_HEAD(&walk
->buffers
);
506 return skcipher_walk_skcipher(walk
, req
);
508 EXPORT_SYMBOL_GPL(skcipher_walk_async
);
510 static int skcipher_walk_aead_common(struct skcipher_walk
*walk
,
511 struct aead_request
*req
, bool atomic
)
513 struct crypto_aead
*tfm
= crypto_aead_reqtfm(req
);
520 if (unlikely(!walk
->total
))
523 walk
->flags
&= ~SKCIPHER_WALK_PHYS
;
525 scatterwalk_start(&walk
->in
, req
->src
);
526 scatterwalk_start(&walk
->out
, req
->dst
);
528 scatterwalk_copychunks(NULL
, &walk
->in
, req
->assoclen
, 2);
529 scatterwalk_copychunks(NULL
, &walk
->out
, req
->assoclen
, 2);
531 scatterwalk_done(&walk
->in
, 0, walk
->total
);
532 scatterwalk_done(&walk
->out
, 0, walk
->total
);
534 if (req
->base
.flags
& CRYPTO_TFM_REQ_MAY_SLEEP
)
535 walk
->flags
|= SKCIPHER_WALK_SLEEP
;
537 walk
->flags
&= ~SKCIPHER_WALK_SLEEP
;
539 walk
->blocksize
= crypto_aead_blocksize(tfm
);
540 walk
->stride
= crypto_aead_chunksize(tfm
);
541 walk
->ivsize
= crypto_aead_ivsize(tfm
);
542 walk
->alignmask
= crypto_aead_alignmask(tfm
);
544 err
= skcipher_walk_first(walk
);
547 walk
->flags
&= ~SKCIPHER_WALK_SLEEP
;
552 int skcipher_walk_aead_encrypt(struct skcipher_walk
*walk
,
553 struct aead_request
*req
, bool atomic
)
555 walk
->total
= req
->cryptlen
;
557 return skcipher_walk_aead_common(walk
, req
, atomic
);
559 EXPORT_SYMBOL_GPL(skcipher_walk_aead_encrypt
);
561 int skcipher_walk_aead_decrypt(struct skcipher_walk
*walk
,
562 struct aead_request
*req
, bool atomic
)
564 struct crypto_aead
*tfm
= crypto_aead_reqtfm(req
);
566 walk
->total
= req
->cryptlen
- crypto_aead_authsize(tfm
);
568 return skcipher_walk_aead_common(walk
, req
, atomic
);
570 EXPORT_SYMBOL_GPL(skcipher_walk_aead_decrypt
);
572 static void skcipher_set_needkey(struct crypto_skcipher
*tfm
)
574 if (crypto_skcipher_max_keysize(tfm
) != 0)
575 crypto_skcipher_set_flags(tfm
, CRYPTO_TFM_NEED_KEY
);
578 static int skcipher_setkey_unaligned(struct crypto_skcipher
*tfm
,
579 const u8
*key
, unsigned int keylen
)
581 unsigned long alignmask
= crypto_skcipher_alignmask(tfm
);
582 struct skcipher_alg
*cipher
= crypto_skcipher_alg(tfm
);
583 u8
*buffer
, *alignbuffer
;
584 unsigned long absize
;
587 absize
= keylen
+ alignmask
;
588 buffer
= kmalloc(absize
, GFP_ATOMIC
);
592 alignbuffer
= (u8
*)ALIGN((unsigned long)buffer
, alignmask
+ 1);
593 memcpy(alignbuffer
, key
, keylen
);
594 ret
= cipher
->setkey(tfm
, alignbuffer
, keylen
);
599 int crypto_skcipher_setkey(struct crypto_skcipher
*tfm
, const u8
*key
,
602 struct skcipher_alg
*cipher
= crypto_skcipher_alg(tfm
);
603 unsigned long alignmask
= crypto_skcipher_alignmask(tfm
);
606 if (keylen
< cipher
->min_keysize
|| keylen
> cipher
->max_keysize
)
609 if ((unsigned long)key
& alignmask
)
610 err
= skcipher_setkey_unaligned(tfm
, key
, keylen
);
612 err
= cipher
->setkey(tfm
, key
, keylen
);
615 skcipher_set_needkey(tfm
);
619 crypto_skcipher_clear_flags(tfm
, CRYPTO_TFM_NEED_KEY
);
622 EXPORT_SYMBOL_GPL(crypto_skcipher_setkey
);
624 int crypto_skcipher_encrypt(struct skcipher_request
*req
)
626 struct crypto_skcipher
*tfm
= crypto_skcipher_reqtfm(req
);
627 struct crypto_alg
*alg
= tfm
->base
.__crt_alg
;
628 unsigned int cryptlen
= req
->cryptlen
;
631 crypto_stats_get(alg
);
632 if (crypto_skcipher_get_flags(tfm
) & CRYPTO_TFM_NEED_KEY
)
635 ret
= crypto_skcipher_alg(tfm
)->encrypt(req
);
636 crypto_stats_skcipher_encrypt(cryptlen
, ret
, alg
);
639 EXPORT_SYMBOL_GPL(crypto_skcipher_encrypt
);
641 int crypto_skcipher_decrypt(struct skcipher_request
*req
)
643 struct crypto_skcipher
*tfm
= crypto_skcipher_reqtfm(req
);
644 struct crypto_alg
*alg
= tfm
->base
.__crt_alg
;
645 unsigned int cryptlen
= req
->cryptlen
;
648 crypto_stats_get(alg
);
649 if (crypto_skcipher_get_flags(tfm
) & CRYPTO_TFM_NEED_KEY
)
652 ret
= crypto_skcipher_alg(tfm
)->decrypt(req
);
653 crypto_stats_skcipher_decrypt(cryptlen
, ret
, alg
);
656 EXPORT_SYMBOL_GPL(crypto_skcipher_decrypt
);
658 static void crypto_skcipher_exit_tfm(struct crypto_tfm
*tfm
)
660 struct crypto_skcipher
*skcipher
= __crypto_skcipher_cast(tfm
);
661 struct skcipher_alg
*alg
= crypto_skcipher_alg(skcipher
);
666 static int crypto_skcipher_init_tfm(struct crypto_tfm
*tfm
)
668 struct crypto_skcipher
*skcipher
= __crypto_skcipher_cast(tfm
);
669 struct skcipher_alg
*alg
= crypto_skcipher_alg(skcipher
);
671 skcipher_set_needkey(skcipher
);
674 skcipher
->base
.exit
= crypto_skcipher_exit_tfm
;
677 return alg
->init(skcipher
);
682 static void crypto_skcipher_free_instance(struct crypto_instance
*inst
)
684 struct skcipher_instance
*skcipher
=
685 container_of(inst
, struct skcipher_instance
, s
.base
);
687 skcipher
->free(skcipher
);
690 static void crypto_skcipher_show(struct seq_file
*m
, struct crypto_alg
*alg
)
692 static void crypto_skcipher_show(struct seq_file
*m
, struct crypto_alg
*alg
)
694 struct skcipher_alg
*skcipher
= container_of(alg
, struct skcipher_alg
,
697 seq_printf(m
, "type : skcipher\n");
698 seq_printf(m
, "async : %s\n",
699 alg
->cra_flags
& CRYPTO_ALG_ASYNC
? "yes" : "no");
700 seq_printf(m
, "blocksize : %u\n", alg
->cra_blocksize
);
701 seq_printf(m
, "min keysize : %u\n", skcipher
->min_keysize
);
702 seq_printf(m
, "max keysize : %u\n", skcipher
->max_keysize
);
703 seq_printf(m
, "ivsize : %u\n", skcipher
->ivsize
);
704 seq_printf(m
, "chunksize : %u\n", skcipher
->chunksize
);
705 seq_printf(m
, "walksize : %u\n", skcipher
->walksize
);
709 static int crypto_skcipher_report(struct sk_buff
*skb
, struct crypto_alg
*alg
)
711 struct crypto_report_blkcipher rblkcipher
;
712 struct skcipher_alg
*skcipher
= container_of(alg
, struct skcipher_alg
,
715 memset(&rblkcipher
, 0, sizeof(rblkcipher
));
717 strscpy(rblkcipher
.type
, "skcipher", sizeof(rblkcipher
.type
));
718 strscpy(rblkcipher
.geniv
, "<none>", sizeof(rblkcipher
.geniv
));
720 rblkcipher
.blocksize
= alg
->cra_blocksize
;
721 rblkcipher
.min_keysize
= skcipher
->min_keysize
;
722 rblkcipher
.max_keysize
= skcipher
->max_keysize
;
723 rblkcipher
.ivsize
= skcipher
->ivsize
;
725 return nla_put(skb
, CRYPTOCFGA_REPORT_BLKCIPHER
,
726 sizeof(rblkcipher
), &rblkcipher
);
729 static int crypto_skcipher_report(struct sk_buff
*skb
, struct crypto_alg
*alg
)
735 static const struct crypto_type crypto_skcipher_type
= {
736 .extsize
= crypto_alg_extsize
,
737 .init_tfm
= crypto_skcipher_init_tfm
,
738 .free
= crypto_skcipher_free_instance
,
739 #ifdef CONFIG_PROC_FS
740 .show
= crypto_skcipher_show
,
742 .report
= crypto_skcipher_report
,
743 .maskclear
= ~CRYPTO_ALG_TYPE_MASK
,
744 .maskset
= CRYPTO_ALG_TYPE_MASK
,
745 .type
= CRYPTO_ALG_TYPE_SKCIPHER
,
746 .tfmsize
= offsetof(struct crypto_skcipher
, base
),
749 int crypto_grab_skcipher(struct crypto_skcipher_spawn
*spawn
,
750 struct crypto_instance
*inst
,
751 const char *name
, u32 type
, u32 mask
)
753 spawn
->base
.frontend
= &crypto_skcipher_type
;
754 return crypto_grab_spawn(&spawn
->base
, inst
, name
, type
, mask
);
756 EXPORT_SYMBOL_GPL(crypto_grab_skcipher
);
758 struct crypto_skcipher
*crypto_alloc_skcipher(const char *alg_name
,
761 return crypto_alloc_tfm(alg_name
, &crypto_skcipher_type
, type
, mask
);
763 EXPORT_SYMBOL_GPL(crypto_alloc_skcipher
);
765 struct crypto_sync_skcipher
*crypto_alloc_sync_skcipher(
766 const char *alg_name
, u32 type
, u32 mask
)
768 struct crypto_skcipher
*tfm
;
770 /* Only sync algorithms allowed. */
771 mask
|= CRYPTO_ALG_ASYNC
;
773 tfm
= crypto_alloc_tfm(alg_name
, &crypto_skcipher_type
, type
, mask
);
776 * Make sure we do not allocate something that might get used with
777 * an on-stack request: check the request size.
779 if (!IS_ERR(tfm
) && WARN_ON(crypto_skcipher_reqsize(tfm
) >
780 MAX_SYNC_SKCIPHER_REQSIZE
)) {
781 crypto_free_skcipher(tfm
);
782 return ERR_PTR(-EINVAL
);
785 return (struct crypto_sync_skcipher
*)tfm
;
787 EXPORT_SYMBOL_GPL(crypto_alloc_sync_skcipher
);
789 int crypto_has_skcipher(const char *alg_name
, u32 type
, u32 mask
)
791 return crypto_type_has_alg(alg_name
, &crypto_skcipher_type
, type
, mask
);
793 EXPORT_SYMBOL_GPL(crypto_has_skcipher
);
795 static int skcipher_prepare_alg(struct skcipher_alg
*alg
)
797 struct crypto_alg
*base
= &alg
->base
;
799 if (alg
->ivsize
> PAGE_SIZE
/ 8 || alg
->chunksize
> PAGE_SIZE
/ 8 ||
800 alg
->walksize
> PAGE_SIZE
/ 8)
804 alg
->chunksize
= base
->cra_blocksize
;
806 alg
->walksize
= alg
->chunksize
;
808 base
->cra_type
= &crypto_skcipher_type
;
809 base
->cra_flags
&= ~CRYPTO_ALG_TYPE_MASK
;
810 base
->cra_flags
|= CRYPTO_ALG_TYPE_SKCIPHER
;
815 int crypto_register_skcipher(struct skcipher_alg
*alg
)
817 struct crypto_alg
*base
= &alg
->base
;
820 err
= skcipher_prepare_alg(alg
);
824 return crypto_register_alg(base
);
826 EXPORT_SYMBOL_GPL(crypto_register_skcipher
);
828 void crypto_unregister_skcipher(struct skcipher_alg
*alg
)
830 crypto_unregister_alg(&alg
->base
);
832 EXPORT_SYMBOL_GPL(crypto_unregister_skcipher
);
834 int crypto_register_skciphers(struct skcipher_alg
*algs
, int count
)
838 for (i
= 0; i
< count
; i
++) {
839 ret
= crypto_register_skcipher(&algs
[i
]);
847 for (--i
; i
>= 0; --i
)
848 crypto_unregister_skcipher(&algs
[i
]);
852 EXPORT_SYMBOL_GPL(crypto_register_skciphers
);
854 void crypto_unregister_skciphers(struct skcipher_alg
*algs
, int count
)
858 for (i
= count
- 1; i
>= 0; --i
)
859 crypto_unregister_skcipher(&algs
[i
]);
861 EXPORT_SYMBOL_GPL(crypto_unregister_skciphers
);
863 int skcipher_register_instance(struct crypto_template
*tmpl
,
864 struct skcipher_instance
*inst
)
868 if (WARN_ON(!inst
->free
))
871 err
= skcipher_prepare_alg(&inst
->alg
);
875 return crypto_register_instance(tmpl
, skcipher_crypto_instance(inst
));
877 EXPORT_SYMBOL_GPL(skcipher_register_instance
);
879 static int skcipher_setkey_simple(struct crypto_skcipher
*tfm
, const u8
*key
,
882 struct crypto_cipher
*cipher
= skcipher_cipher_simple(tfm
);
884 crypto_cipher_clear_flags(cipher
, CRYPTO_TFM_REQ_MASK
);
885 crypto_cipher_set_flags(cipher
, crypto_skcipher_get_flags(tfm
) &
886 CRYPTO_TFM_REQ_MASK
);
887 return crypto_cipher_setkey(cipher
, key
, keylen
);
890 static int skcipher_init_tfm_simple(struct crypto_skcipher
*tfm
)
892 struct skcipher_instance
*inst
= skcipher_alg_instance(tfm
);
893 struct crypto_cipher_spawn
*spawn
= skcipher_instance_ctx(inst
);
894 struct skcipher_ctx_simple
*ctx
= crypto_skcipher_ctx(tfm
);
895 struct crypto_cipher
*cipher
;
897 cipher
= crypto_spawn_cipher(spawn
);
899 return PTR_ERR(cipher
);
901 ctx
->cipher
= cipher
;
905 static void skcipher_exit_tfm_simple(struct crypto_skcipher
*tfm
)
907 struct skcipher_ctx_simple
*ctx
= crypto_skcipher_ctx(tfm
);
909 crypto_free_cipher(ctx
->cipher
);
912 static void skcipher_free_instance_simple(struct skcipher_instance
*inst
)
914 crypto_drop_cipher(skcipher_instance_ctx(inst
));
919 * skcipher_alloc_instance_simple - allocate instance of simple block cipher mode
921 * Allocate an skcipher_instance for a simple block cipher mode of operation,
922 * e.g. cbc or ecb. The instance context will have just a single crypto_spawn,
923 * that for the underlying cipher. The {min,max}_keysize, ivsize, blocksize,
924 * alignmask, and priority are set from the underlying cipher but can be
925 * overridden if needed. The tfm context defaults to skcipher_ctx_simple, and
926 * default ->setkey(), ->init(), and ->exit() methods are installed.
928 * @tmpl: the template being instantiated
929 * @tb: the template parameters
931 * Return: a pointer to the new instance, or an ERR_PTR(). The caller still
932 * needs to register the instance.
934 struct skcipher_instance
*skcipher_alloc_instance_simple(
935 struct crypto_template
*tmpl
, struct rtattr
**tb
)
937 struct crypto_attr_type
*algt
;
939 struct skcipher_instance
*inst
;
940 struct crypto_cipher_spawn
*spawn
;
941 struct crypto_alg
*cipher_alg
;
944 algt
= crypto_get_attr_type(tb
);
946 return ERR_CAST(algt
);
948 if ((algt
->type
^ CRYPTO_ALG_TYPE_SKCIPHER
) & algt
->mask
)
949 return ERR_PTR(-EINVAL
);
951 mask
= crypto_requires_off(algt
->type
, algt
->mask
,
952 CRYPTO_ALG_NEED_FALLBACK
);
954 inst
= kzalloc(sizeof(*inst
) + sizeof(*spawn
), GFP_KERNEL
);
956 return ERR_PTR(-ENOMEM
);
957 spawn
= skcipher_instance_ctx(inst
);
959 err
= crypto_grab_cipher(spawn
, skcipher_crypto_instance(inst
),
960 crypto_attr_alg_name(tb
[1]), 0, mask
);
963 cipher_alg
= crypto_spawn_cipher_alg(spawn
);
965 err
= crypto_inst_setname(skcipher_crypto_instance(inst
), tmpl
->name
,
970 inst
->free
= skcipher_free_instance_simple
;
972 /* Default algorithm properties, can be overridden */
973 inst
->alg
.base
.cra_blocksize
= cipher_alg
->cra_blocksize
;
974 inst
->alg
.base
.cra_alignmask
= cipher_alg
->cra_alignmask
;
975 inst
->alg
.base
.cra_priority
= cipher_alg
->cra_priority
;
976 inst
->alg
.min_keysize
= cipher_alg
->cra_cipher
.cia_min_keysize
;
977 inst
->alg
.max_keysize
= cipher_alg
->cra_cipher
.cia_max_keysize
;
978 inst
->alg
.ivsize
= cipher_alg
->cra_blocksize
;
980 /* Use skcipher_ctx_simple by default, can be overridden */
981 inst
->alg
.base
.cra_ctxsize
= sizeof(struct skcipher_ctx_simple
);
982 inst
->alg
.setkey
= skcipher_setkey_simple
;
983 inst
->alg
.init
= skcipher_init_tfm_simple
;
984 inst
->alg
.exit
= skcipher_exit_tfm_simple
;
989 skcipher_free_instance_simple(inst
);
992 EXPORT_SYMBOL_GPL(skcipher_alloc_instance_simple
);
994 MODULE_LICENSE("GPL");
995 MODULE_DESCRIPTION("Symmetric key cipher type");