2 * Symmetric key cipher operations.
4 * Generic encrypt/decrypt wrapper for ciphers, handles operations across
5 * multiple page boundaries by using temporary blocks. In user context,
6 * the kernel is given a chance to schedule us once per page.
8 * Copyright (c) 2015 Herbert Xu <herbert@gondor.apana.org.au>
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License as published by the Free
12 * Software Foundation; either version 2 of the License, or (at your option)
17 #include <crypto/internal/aead.h>
18 #include <crypto/internal/skcipher.h>
19 #include <crypto/scatterwalk.h>
20 #include <linux/bug.h>
21 #include <linux/cryptouser.h>
22 #include <linux/compiler.h>
23 #include <linux/list.h>
24 #include <linux/module.h>
25 #include <linux/rtnetlink.h>
26 #include <linux/seq_file.h>
27 #include <net/netlink.h>
32 SKCIPHER_WALK_PHYS
= 1 << 0,
33 SKCIPHER_WALK_SLOW
= 1 << 1,
34 SKCIPHER_WALK_COPY
= 1 << 2,
35 SKCIPHER_WALK_DIFF
= 1 << 3,
36 SKCIPHER_WALK_SLEEP
= 1 << 4,
39 struct skcipher_walk_buffer
{
40 struct list_head entry
;
41 struct scatter_walk dst
;
47 static int skcipher_walk_next(struct skcipher_walk
*walk
);
49 static inline void skcipher_unmap(struct scatter_walk
*walk
, void *vaddr
)
51 if (PageHighMem(scatterwalk_page(walk
)))
55 static inline void *skcipher_map(struct scatter_walk
*walk
)
57 struct page
*page
= scatterwalk_page(walk
);
59 return (PageHighMem(page
) ? kmap_atomic(page
) : page_address(page
)) +
60 offset_in_page(walk
->offset
);
63 static inline void skcipher_map_src(struct skcipher_walk
*walk
)
65 walk
->src
.virt
.addr
= skcipher_map(&walk
->in
);
68 static inline void skcipher_map_dst(struct skcipher_walk
*walk
)
70 walk
->dst
.virt
.addr
= skcipher_map(&walk
->out
);
73 static inline void skcipher_unmap_src(struct skcipher_walk
*walk
)
75 skcipher_unmap(&walk
->in
, walk
->src
.virt
.addr
);
78 static inline void skcipher_unmap_dst(struct skcipher_walk
*walk
)
80 skcipher_unmap(&walk
->out
, walk
->dst
.virt
.addr
);
83 static inline gfp_t
skcipher_walk_gfp(struct skcipher_walk
*walk
)
85 return walk
->flags
& SKCIPHER_WALK_SLEEP
? GFP_KERNEL
: GFP_ATOMIC
;
88 /* Get a spot of the specified length that does not straddle a page.
89 * The caller needs to ensure that there is enough space for this operation.
91 static inline u8
*skcipher_get_spot(u8
*start
, unsigned int len
)
93 u8
*end_page
= (u8
*)(((unsigned long)(start
+ len
- 1)) & PAGE_MASK
);
95 return max(start
, end_page
);
98 static int skcipher_done_slow(struct skcipher_walk
*walk
, unsigned int bsize
)
102 addr
= (u8
*)ALIGN((unsigned long)walk
->buffer
, walk
->alignmask
+ 1);
103 addr
= skcipher_get_spot(addr
, bsize
);
104 scatterwalk_copychunks(addr
, &walk
->out
, bsize
,
105 (walk
->flags
& SKCIPHER_WALK_PHYS
) ? 2 : 1);
109 int skcipher_walk_done(struct skcipher_walk
*walk
, int err
)
111 unsigned int n
= walk
->nbytes
;
112 unsigned int nbytes
= 0;
117 if (likely(err
>= 0)) {
119 nbytes
= walk
->total
- n
;
122 if (likely(!(walk
->flags
& (SKCIPHER_WALK_PHYS
|
125 SKCIPHER_WALK_DIFF
)))) {
127 skcipher_unmap_src(walk
);
128 } else if (walk
->flags
& SKCIPHER_WALK_DIFF
) {
129 skcipher_unmap_dst(walk
);
131 } else if (walk
->flags
& SKCIPHER_WALK_COPY
) {
132 skcipher_map_dst(walk
);
133 memcpy(walk
->dst
.virt
.addr
, walk
->page
, n
);
134 skcipher_unmap_dst(walk
);
135 } else if (unlikely(walk
->flags
& SKCIPHER_WALK_SLOW
)) {
138 * Didn't process all bytes. Either the algorithm is
139 * broken, or this was the last step and it turned out
140 * the message wasn't evenly divisible into blocks but
141 * the algorithm requires it.
146 n
= skcipher_done_slow(walk
, n
);
152 walk
->total
= nbytes
;
155 scatterwalk_advance(&walk
->in
, n
);
156 scatterwalk_advance(&walk
->out
, n
);
157 scatterwalk_done(&walk
->in
, 0, nbytes
);
158 scatterwalk_done(&walk
->out
, 1, nbytes
);
161 crypto_yield(walk
->flags
& SKCIPHER_WALK_SLEEP
?
162 CRYPTO_TFM_REQ_MAY_SLEEP
: 0);
163 return skcipher_walk_next(walk
);
167 /* Short-circuit for the common/fast path. */
168 if (!((unsigned long)walk
->buffer
| (unsigned long)walk
->page
))
171 if (walk
->flags
& SKCIPHER_WALK_PHYS
)
174 if (walk
->iv
!= walk
->oiv
)
175 memcpy(walk
->oiv
, walk
->iv
, walk
->ivsize
);
176 if (walk
->buffer
!= walk
->page
)
179 free_page((unsigned long)walk
->page
);
184 EXPORT_SYMBOL_GPL(skcipher_walk_done
);
186 void skcipher_walk_complete(struct skcipher_walk
*walk
, int err
)
188 struct skcipher_walk_buffer
*p
, *tmp
;
190 list_for_each_entry_safe(p
, tmp
, &walk
->buffers
, entry
) {
198 data
= PTR_ALIGN(&p
->buffer
[0], walk
->alignmask
+ 1);
199 data
= skcipher_get_spot(data
, walk
->stride
);
202 scatterwalk_copychunks(data
, &p
->dst
, p
->len
, 1);
204 if (offset_in_page(p
->data
) + p
->len
+ walk
->stride
>
206 free_page((unsigned long)p
->data
);
213 if (!err
&& walk
->iv
!= walk
->oiv
)
214 memcpy(walk
->oiv
, walk
->iv
, walk
->ivsize
);
215 if (walk
->buffer
!= walk
->page
)
218 free_page((unsigned long)walk
->page
);
220 EXPORT_SYMBOL_GPL(skcipher_walk_complete
);
222 static void skcipher_queue_write(struct skcipher_walk
*walk
,
223 struct skcipher_walk_buffer
*p
)
226 list_add_tail(&p
->entry
, &walk
->buffers
);
229 static int skcipher_next_slow(struct skcipher_walk
*walk
, unsigned int bsize
)
231 bool phys
= walk
->flags
& SKCIPHER_WALK_PHYS
;
232 unsigned alignmask
= walk
->alignmask
;
233 struct skcipher_walk_buffer
*p
;
241 walk
->buffer
= walk
->page
;
242 buffer
= walk
->buffer
;
247 /* Start with the minimum alignment of kmalloc. */
248 a
= crypto_tfm_ctx_alignment() - 1;
252 /* Calculate the minimum alignment of p->buffer. */
253 a
&= (sizeof(*p
) ^ (sizeof(*p
) - 1)) >> 1;
257 /* Minimum size to align p->buffer by alignmask. */
260 /* Minimum size to ensure p->buffer does not straddle a page. */
261 n
+= (bsize
- 1) & ~(alignmask
| a
);
263 v
= kzalloc(n
, skcipher_walk_gfp(walk
));
265 return skcipher_walk_done(walk
, -ENOMEM
);
270 skcipher_queue_write(walk
, p
);
278 walk
->dst
.virt
.addr
= PTR_ALIGN(buffer
, alignmask
+ 1);
279 walk
->dst
.virt
.addr
= skcipher_get_spot(walk
->dst
.virt
.addr
, bsize
);
280 walk
->src
.virt
.addr
= walk
->dst
.virt
.addr
;
282 scatterwalk_copychunks(walk
->src
.virt
.addr
, &walk
->in
, bsize
, 0);
284 walk
->nbytes
= bsize
;
285 walk
->flags
|= SKCIPHER_WALK_SLOW
;
290 static int skcipher_next_copy(struct skcipher_walk
*walk
)
292 struct skcipher_walk_buffer
*p
;
293 u8
*tmp
= walk
->page
;
295 skcipher_map_src(walk
);
296 memcpy(tmp
, walk
->src
.virt
.addr
, walk
->nbytes
);
297 skcipher_unmap_src(walk
);
299 walk
->src
.virt
.addr
= tmp
;
300 walk
->dst
.virt
.addr
= tmp
;
302 if (!(walk
->flags
& SKCIPHER_WALK_PHYS
))
305 p
= kmalloc(sizeof(*p
), skcipher_walk_gfp(walk
));
309 p
->data
= walk
->page
;
310 p
->len
= walk
->nbytes
;
311 skcipher_queue_write(walk
, p
);
313 if (offset_in_page(walk
->page
) + walk
->nbytes
+ walk
->stride
>
317 walk
->page
+= walk
->nbytes
;
322 static int skcipher_next_fast(struct skcipher_walk
*walk
)
326 walk
->src
.phys
.page
= scatterwalk_page(&walk
->in
);
327 walk
->src
.phys
.offset
= offset_in_page(walk
->in
.offset
);
328 walk
->dst
.phys
.page
= scatterwalk_page(&walk
->out
);
329 walk
->dst
.phys
.offset
= offset_in_page(walk
->out
.offset
);
331 if (walk
->flags
& SKCIPHER_WALK_PHYS
)
334 diff
= walk
->src
.phys
.offset
- walk
->dst
.phys
.offset
;
335 diff
|= walk
->src
.virt
.page
- walk
->dst
.virt
.page
;
337 skcipher_map_src(walk
);
338 walk
->dst
.virt
.addr
= walk
->src
.virt
.addr
;
341 walk
->flags
|= SKCIPHER_WALK_DIFF
;
342 skcipher_map_dst(walk
);
348 static int skcipher_walk_next(struct skcipher_walk
*walk
)
354 walk
->flags
&= ~(SKCIPHER_WALK_SLOW
| SKCIPHER_WALK_COPY
|
358 bsize
= min(walk
->stride
, max(n
, walk
->blocksize
));
359 n
= scatterwalk_clamp(&walk
->in
, n
);
360 n
= scatterwalk_clamp(&walk
->out
, n
);
362 if (unlikely(n
< bsize
)) {
363 if (unlikely(walk
->total
< walk
->blocksize
))
364 return skcipher_walk_done(walk
, -EINVAL
);
367 err
= skcipher_next_slow(walk
, bsize
);
368 goto set_phys_lowmem
;
371 if (unlikely((walk
->in
.offset
| walk
->out
.offset
) & walk
->alignmask
)) {
373 gfp_t gfp
= skcipher_walk_gfp(walk
);
375 walk
->page
= (void *)__get_free_page(gfp
);
380 walk
->nbytes
= min_t(unsigned, n
,
381 PAGE_SIZE
- offset_in_page(walk
->page
));
382 walk
->flags
|= SKCIPHER_WALK_COPY
;
383 err
= skcipher_next_copy(walk
);
384 goto set_phys_lowmem
;
389 return skcipher_next_fast(walk
);
392 if (!err
&& (walk
->flags
& SKCIPHER_WALK_PHYS
)) {
393 walk
->src
.phys
.page
= virt_to_page(walk
->src
.virt
.addr
);
394 walk
->dst
.phys
.page
= virt_to_page(walk
->dst
.virt
.addr
);
395 walk
->src
.phys
.offset
&= PAGE_SIZE
- 1;
396 walk
->dst
.phys
.offset
&= PAGE_SIZE
- 1;
401 static int skcipher_copy_iv(struct skcipher_walk
*walk
)
403 unsigned a
= crypto_tfm_ctx_alignment() - 1;
404 unsigned alignmask
= walk
->alignmask
;
405 unsigned ivsize
= walk
->ivsize
;
406 unsigned bs
= walk
->stride
;
411 aligned_bs
= ALIGN(bs
, alignmask
+ 1);
413 /* Minimum size to align buffer by alignmask. */
414 size
= alignmask
& ~a
;
416 if (walk
->flags
& SKCIPHER_WALK_PHYS
)
419 size
+= aligned_bs
+ ivsize
;
421 /* Minimum size to ensure buffer does not straddle a page. */
422 size
+= (bs
- 1) & ~(alignmask
| a
);
425 walk
->buffer
= kmalloc(size
, skcipher_walk_gfp(walk
));
429 iv
= PTR_ALIGN(walk
->buffer
, alignmask
+ 1);
430 iv
= skcipher_get_spot(iv
, bs
) + aligned_bs
;
432 walk
->iv
= memcpy(iv
, walk
->iv
, walk
->ivsize
);
436 static int skcipher_walk_first(struct skcipher_walk
*walk
)
438 if (WARN_ON_ONCE(in_irq()))
442 if (unlikely(((unsigned long)walk
->iv
& walk
->alignmask
))) {
443 int err
= skcipher_copy_iv(walk
);
450 return skcipher_walk_next(walk
);
453 static int skcipher_walk_skcipher(struct skcipher_walk
*walk
,
454 struct skcipher_request
*req
)
456 struct crypto_skcipher
*tfm
= crypto_skcipher_reqtfm(req
);
458 walk
->total
= req
->cryptlen
;
463 if (unlikely(!walk
->total
))
466 scatterwalk_start(&walk
->in
, req
->src
);
467 scatterwalk_start(&walk
->out
, req
->dst
);
469 walk
->flags
&= ~SKCIPHER_WALK_SLEEP
;
470 walk
->flags
|= req
->base
.flags
& CRYPTO_TFM_REQ_MAY_SLEEP
?
471 SKCIPHER_WALK_SLEEP
: 0;
473 walk
->blocksize
= crypto_skcipher_blocksize(tfm
);
474 walk
->stride
= crypto_skcipher_walksize(tfm
);
475 walk
->ivsize
= crypto_skcipher_ivsize(tfm
);
476 walk
->alignmask
= crypto_skcipher_alignmask(tfm
);
478 return skcipher_walk_first(walk
);
481 int skcipher_walk_virt(struct skcipher_walk
*walk
,
482 struct skcipher_request
*req
, bool atomic
)
486 walk
->flags
&= ~SKCIPHER_WALK_PHYS
;
488 err
= skcipher_walk_skcipher(walk
, req
);
490 walk
->flags
&= atomic
? ~SKCIPHER_WALK_SLEEP
: ~0;
494 EXPORT_SYMBOL_GPL(skcipher_walk_virt
);
496 void skcipher_walk_atomise(struct skcipher_walk
*walk
)
498 walk
->flags
&= ~SKCIPHER_WALK_SLEEP
;
500 EXPORT_SYMBOL_GPL(skcipher_walk_atomise
);
502 int skcipher_walk_async(struct skcipher_walk
*walk
,
503 struct skcipher_request
*req
)
505 walk
->flags
|= SKCIPHER_WALK_PHYS
;
507 INIT_LIST_HEAD(&walk
->buffers
);
509 return skcipher_walk_skcipher(walk
, req
);
511 EXPORT_SYMBOL_GPL(skcipher_walk_async
);
513 static int skcipher_walk_aead_common(struct skcipher_walk
*walk
,
514 struct aead_request
*req
, bool atomic
)
516 struct crypto_aead
*tfm
= crypto_aead_reqtfm(req
);
523 if (unlikely(!walk
->total
))
526 walk
->flags
&= ~SKCIPHER_WALK_PHYS
;
528 scatterwalk_start(&walk
->in
, req
->src
);
529 scatterwalk_start(&walk
->out
, req
->dst
);
531 scatterwalk_copychunks(NULL
, &walk
->in
, req
->assoclen
, 2);
532 scatterwalk_copychunks(NULL
, &walk
->out
, req
->assoclen
, 2);
534 scatterwalk_done(&walk
->in
, 0, walk
->total
);
535 scatterwalk_done(&walk
->out
, 0, walk
->total
);
537 if (req
->base
.flags
& CRYPTO_TFM_REQ_MAY_SLEEP
)
538 walk
->flags
|= SKCIPHER_WALK_SLEEP
;
540 walk
->flags
&= ~SKCIPHER_WALK_SLEEP
;
542 walk
->blocksize
= crypto_aead_blocksize(tfm
);
543 walk
->stride
= crypto_aead_chunksize(tfm
);
544 walk
->ivsize
= crypto_aead_ivsize(tfm
);
545 walk
->alignmask
= crypto_aead_alignmask(tfm
);
547 err
= skcipher_walk_first(walk
);
550 walk
->flags
&= ~SKCIPHER_WALK_SLEEP
;
555 int skcipher_walk_aead(struct skcipher_walk
*walk
, struct aead_request
*req
,
558 walk
->total
= req
->cryptlen
;
560 return skcipher_walk_aead_common(walk
, req
, atomic
);
562 EXPORT_SYMBOL_GPL(skcipher_walk_aead
);
564 int skcipher_walk_aead_encrypt(struct skcipher_walk
*walk
,
565 struct aead_request
*req
, bool atomic
)
567 walk
->total
= req
->cryptlen
;
569 return skcipher_walk_aead_common(walk
, req
, atomic
);
571 EXPORT_SYMBOL_GPL(skcipher_walk_aead_encrypt
);
573 int skcipher_walk_aead_decrypt(struct skcipher_walk
*walk
,
574 struct aead_request
*req
, bool atomic
)
576 struct crypto_aead
*tfm
= crypto_aead_reqtfm(req
);
578 walk
->total
= req
->cryptlen
- crypto_aead_authsize(tfm
);
580 return skcipher_walk_aead_common(walk
, req
, atomic
);
582 EXPORT_SYMBOL_GPL(skcipher_walk_aead_decrypt
);
584 static unsigned int crypto_skcipher_extsize(struct crypto_alg
*alg
)
586 if (alg
->cra_type
== &crypto_blkcipher_type
)
587 return sizeof(struct crypto_blkcipher
*);
589 if (alg
->cra_type
== &crypto_ablkcipher_type
||
590 alg
->cra_type
== &crypto_givcipher_type
)
591 return sizeof(struct crypto_ablkcipher
*);
593 return crypto_alg_extsize(alg
);
596 static void skcipher_set_needkey(struct crypto_skcipher
*tfm
)
599 crypto_skcipher_set_flags(tfm
, CRYPTO_TFM_NEED_KEY
);
602 static int skcipher_setkey_blkcipher(struct crypto_skcipher
*tfm
,
603 const u8
*key
, unsigned int keylen
)
605 struct crypto_blkcipher
**ctx
= crypto_skcipher_ctx(tfm
);
606 struct crypto_blkcipher
*blkcipher
= *ctx
;
609 crypto_blkcipher_clear_flags(blkcipher
, ~0);
610 crypto_blkcipher_set_flags(blkcipher
, crypto_skcipher_get_flags(tfm
) &
611 CRYPTO_TFM_REQ_MASK
);
612 err
= crypto_blkcipher_setkey(blkcipher
, key
, keylen
);
613 crypto_skcipher_set_flags(tfm
, crypto_blkcipher_get_flags(blkcipher
) &
614 CRYPTO_TFM_RES_MASK
);
616 skcipher_set_needkey(tfm
);
620 crypto_skcipher_clear_flags(tfm
, CRYPTO_TFM_NEED_KEY
);
624 static int skcipher_crypt_blkcipher(struct skcipher_request
*req
,
625 int (*crypt
)(struct blkcipher_desc
*,
626 struct scatterlist
*,
627 struct scatterlist
*,
630 struct crypto_skcipher
*tfm
= crypto_skcipher_reqtfm(req
);
631 struct crypto_blkcipher
**ctx
= crypto_skcipher_ctx(tfm
);
632 struct blkcipher_desc desc
= {
635 .flags
= req
->base
.flags
,
639 return crypt(&desc
, req
->dst
, req
->src
, req
->cryptlen
);
642 static int skcipher_encrypt_blkcipher(struct skcipher_request
*req
)
644 struct crypto_skcipher
*skcipher
= crypto_skcipher_reqtfm(req
);
645 struct crypto_tfm
*tfm
= crypto_skcipher_tfm(skcipher
);
646 struct blkcipher_alg
*alg
= &tfm
->__crt_alg
->cra_blkcipher
;
648 return skcipher_crypt_blkcipher(req
, alg
->encrypt
);
651 static int skcipher_decrypt_blkcipher(struct skcipher_request
*req
)
653 struct crypto_skcipher
*skcipher
= crypto_skcipher_reqtfm(req
);
654 struct crypto_tfm
*tfm
= crypto_skcipher_tfm(skcipher
);
655 struct blkcipher_alg
*alg
= &tfm
->__crt_alg
->cra_blkcipher
;
657 return skcipher_crypt_blkcipher(req
, alg
->decrypt
);
660 static void crypto_exit_skcipher_ops_blkcipher(struct crypto_tfm
*tfm
)
662 struct crypto_blkcipher
**ctx
= crypto_tfm_ctx(tfm
);
664 crypto_free_blkcipher(*ctx
);
667 static int crypto_init_skcipher_ops_blkcipher(struct crypto_tfm
*tfm
)
669 struct crypto_alg
*calg
= tfm
->__crt_alg
;
670 struct crypto_skcipher
*skcipher
= __crypto_skcipher_cast(tfm
);
671 struct crypto_blkcipher
**ctx
= crypto_tfm_ctx(tfm
);
672 struct crypto_blkcipher
*blkcipher
;
673 struct crypto_tfm
*btfm
;
675 if (!crypto_mod_get(calg
))
678 btfm
= __crypto_alloc_tfm(calg
, CRYPTO_ALG_TYPE_BLKCIPHER
,
679 CRYPTO_ALG_TYPE_MASK
);
681 crypto_mod_put(calg
);
682 return PTR_ERR(btfm
);
685 blkcipher
= __crypto_blkcipher_cast(btfm
);
687 tfm
->exit
= crypto_exit_skcipher_ops_blkcipher
;
689 skcipher
->setkey
= skcipher_setkey_blkcipher
;
690 skcipher
->encrypt
= skcipher_encrypt_blkcipher
;
691 skcipher
->decrypt
= skcipher_decrypt_blkcipher
;
693 skcipher
->ivsize
= crypto_blkcipher_ivsize(blkcipher
);
694 skcipher
->keysize
= calg
->cra_blkcipher
.max_keysize
;
696 skcipher_set_needkey(skcipher
);
701 static int skcipher_setkey_ablkcipher(struct crypto_skcipher
*tfm
,
702 const u8
*key
, unsigned int keylen
)
704 struct crypto_ablkcipher
**ctx
= crypto_skcipher_ctx(tfm
);
705 struct crypto_ablkcipher
*ablkcipher
= *ctx
;
708 crypto_ablkcipher_clear_flags(ablkcipher
, ~0);
709 crypto_ablkcipher_set_flags(ablkcipher
,
710 crypto_skcipher_get_flags(tfm
) &
711 CRYPTO_TFM_REQ_MASK
);
712 err
= crypto_ablkcipher_setkey(ablkcipher
, key
, keylen
);
713 crypto_skcipher_set_flags(tfm
,
714 crypto_ablkcipher_get_flags(ablkcipher
) &
715 CRYPTO_TFM_RES_MASK
);
717 skcipher_set_needkey(tfm
);
721 crypto_skcipher_clear_flags(tfm
, CRYPTO_TFM_NEED_KEY
);
725 static int skcipher_crypt_ablkcipher(struct skcipher_request
*req
,
726 int (*crypt
)(struct ablkcipher_request
*))
728 struct crypto_skcipher
*tfm
= crypto_skcipher_reqtfm(req
);
729 struct crypto_ablkcipher
**ctx
= crypto_skcipher_ctx(tfm
);
730 struct ablkcipher_request
*subreq
= skcipher_request_ctx(req
);
732 ablkcipher_request_set_tfm(subreq
, *ctx
);
733 ablkcipher_request_set_callback(subreq
, skcipher_request_flags(req
),
734 req
->base
.complete
, req
->base
.data
);
735 ablkcipher_request_set_crypt(subreq
, req
->src
, req
->dst
, req
->cryptlen
,
738 return crypt(subreq
);
741 static int skcipher_encrypt_ablkcipher(struct skcipher_request
*req
)
743 struct crypto_skcipher
*skcipher
= crypto_skcipher_reqtfm(req
);
744 struct crypto_tfm
*tfm
= crypto_skcipher_tfm(skcipher
);
745 struct ablkcipher_alg
*alg
= &tfm
->__crt_alg
->cra_ablkcipher
;
747 return skcipher_crypt_ablkcipher(req
, alg
->encrypt
);
750 static int skcipher_decrypt_ablkcipher(struct skcipher_request
*req
)
752 struct crypto_skcipher
*skcipher
= crypto_skcipher_reqtfm(req
);
753 struct crypto_tfm
*tfm
= crypto_skcipher_tfm(skcipher
);
754 struct ablkcipher_alg
*alg
= &tfm
->__crt_alg
->cra_ablkcipher
;
756 return skcipher_crypt_ablkcipher(req
, alg
->decrypt
);
759 static void crypto_exit_skcipher_ops_ablkcipher(struct crypto_tfm
*tfm
)
761 struct crypto_ablkcipher
**ctx
= crypto_tfm_ctx(tfm
);
763 crypto_free_ablkcipher(*ctx
);
766 static int crypto_init_skcipher_ops_ablkcipher(struct crypto_tfm
*tfm
)
768 struct crypto_alg
*calg
= tfm
->__crt_alg
;
769 struct crypto_skcipher
*skcipher
= __crypto_skcipher_cast(tfm
);
770 struct crypto_ablkcipher
**ctx
= crypto_tfm_ctx(tfm
);
771 struct crypto_ablkcipher
*ablkcipher
;
772 struct crypto_tfm
*abtfm
;
774 if (!crypto_mod_get(calg
))
777 abtfm
= __crypto_alloc_tfm(calg
, 0, 0);
779 crypto_mod_put(calg
);
780 return PTR_ERR(abtfm
);
783 ablkcipher
= __crypto_ablkcipher_cast(abtfm
);
785 tfm
->exit
= crypto_exit_skcipher_ops_ablkcipher
;
787 skcipher
->setkey
= skcipher_setkey_ablkcipher
;
788 skcipher
->encrypt
= skcipher_encrypt_ablkcipher
;
789 skcipher
->decrypt
= skcipher_decrypt_ablkcipher
;
791 skcipher
->ivsize
= crypto_ablkcipher_ivsize(ablkcipher
);
792 skcipher
->reqsize
= crypto_ablkcipher_reqsize(ablkcipher
) +
793 sizeof(struct ablkcipher_request
);
794 skcipher
->keysize
= calg
->cra_ablkcipher
.max_keysize
;
796 skcipher_set_needkey(skcipher
);
801 static int skcipher_setkey_unaligned(struct crypto_skcipher
*tfm
,
802 const u8
*key
, unsigned int keylen
)
804 unsigned long alignmask
= crypto_skcipher_alignmask(tfm
);
805 struct skcipher_alg
*cipher
= crypto_skcipher_alg(tfm
);
806 u8
*buffer
, *alignbuffer
;
807 unsigned long absize
;
810 absize
= keylen
+ alignmask
;
811 buffer
= kmalloc(absize
, GFP_ATOMIC
);
815 alignbuffer
= (u8
*)ALIGN((unsigned long)buffer
, alignmask
+ 1);
816 memcpy(alignbuffer
, key
, keylen
);
817 ret
= cipher
->setkey(tfm
, alignbuffer
, keylen
);
822 static int skcipher_setkey(struct crypto_skcipher
*tfm
, const u8
*key
,
825 struct skcipher_alg
*cipher
= crypto_skcipher_alg(tfm
);
826 unsigned long alignmask
= crypto_skcipher_alignmask(tfm
);
829 if (keylen
< cipher
->min_keysize
|| keylen
> cipher
->max_keysize
) {
830 crypto_skcipher_set_flags(tfm
, CRYPTO_TFM_RES_BAD_KEY_LEN
);
834 if ((unsigned long)key
& alignmask
)
835 err
= skcipher_setkey_unaligned(tfm
, key
, keylen
);
837 err
= cipher
->setkey(tfm
, key
, keylen
);
840 skcipher_set_needkey(tfm
);
844 crypto_skcipher_clear_flags(tfm
, CRYPTO_TFM_NEED_KEY
);
848 static void crypto_skcipher_exit_tfm(struct crypto_tfm
*tfm
)
850 struct crypto_skcipher
*skcipher
= __crypto_skcipher_cast(tfm
);
851 struct skcipher_alg
*alg
= crypto_skcipher_alg(skcipher
);
856 static int crypto_skcipher_init_tfm(struct crypto_tfm
*tfm
)
858 struct crypto_skcipher
*skcipher
= __crypto_skcipher_cast(tfm
);
859 struct skcipher_alg
*alg
= crypto_skcipher_alg(skcipher
);
861 if (tfm
->__crt_alg
->cra_type
== &crypto_blkcipher_type
)
862 return crypto_init_skcipher_ops_blkcipher(tfm
);
864 if (tfm
->__crt_alg
->cra_type
== &crypto_ablkcipher_type
||
865 tfm
->__crt_alg
->cra_type
== &crypto_givcipher_type
)
866 return crypto_init_skcipher_ops_ablkcipher(tfm
);
868 skcipher
->setkey
= skcipher_setkey
;
869 skcipher
->encrypt
= alg
->encrypt
;
870 skcipher
->decrypt
= alg
->decrypt
;
871 skcipher
->ivsize
= alg
->ivsize
;
872 skcipher
->keysize
= alg
->max_keysize
;
874 skcipher_set_needkey(skcipher
);
877 skcipher
->base
.exit
= crypto_skcipher_exit_tfm
;
880 return alg
->init(skcipher
);
885 static void crypto_skcipher_free_instance(struct crypto_instance
*inst
)
887 struct skcipher_instance
*skcipher
=
888 container_of(inst
, struct skcipher_instance
, s
.base
);
890 skcipher
->free(skcipher
);
893 static void crypto_skcipher_show(struct seq_file
*m
, struct crypto_alg
*alg
)
895 static void crypto_skcipher_show(struct seq_file
*m
, struct crypto_alg
*alg
)
897 struct skcipher_alg
*skcipher
= container_of(alg
, struct skcipher_alg
,
900 seq_printf(m
, "type : skcipher\n");
901 seq_printf(m
, "async : %s\n",
902 alg
->cra_flags
& CRYPTO_ALG_ASYNC
? "yes" : "no");
903 seq_printf(m
, "blocksize : %u\n", alg
->cra_blocksize
);
904 seq_printf(m
, "min keysize : %u\n", skcipher
->min_keysize
);
905 seq_printf(m
, "max keysize : %u\n", skcipher
->max_keysize
);
906 seq_printf(m
, "ivsize : %u\n", skcipher
->ivsize
);
907 seq_printf(m
, "chunksize : %u\n", skcipher
->chunksize
);
908 seq_printf(m
, "walksize : %u\n", skcipher
->walksize
);
912 static int crypto_skcipher_report(struct sk_buff
*skb
, struct crypto_alg
*alg
)
914 struct crypto_report_blkcipher rblkcipher
;
915 struct skcipher_alg
*skcipher
= container_of(alg
, struct skcipher_alg
,
918 strncpy(rblkcipher
.type
, "skcipher", sizeof(rblkcipher
.type
));
919 strncpy(rblkcipher
.geniv
, "<none>", sizeof(rblkcipher
.geniv
));
921 rblkcipher
.blocksize
= alg
->cra_blocksize
;
922 rblkcipher
.min_keysize
= skcipher
->min_keysize
;
923 rblkcipher
.max_keysize
= skcipher
->max_keysize
;
924 rblkcipher
.ivsize
= skcipher
->ivsize
;
926 if (nla_put(skb
, CRYPTOCFGA_REPORT_BLKCIPHER
,
927 sizeof(struct crypto_report_blkcipher
), &rblkcipher
))
928 goto nla_put_failure
;
935 static int crypto_skcipher_report(struct sk_buff
*skb
, struct crypto_alg
*alg
)
941 static const struct crypto_type crypto_skcipher_type2
= {
942 .extsize
= crypto_skcipher_extsize
,
943 .init_tfm
= crypto_skcipher_init_tfm
,
944 .free
= crypto_skcipher_free_instance
,
945 #ifdef CONFIG_PROC_FS
946 .show
= crypto_skcipher_show
,
948 .report
= crypto_skcipher_report
,
949 .maskclear
= ~CRYPTO_ALG_TYPE_MASK
,
950 .maskset
= CRYPTO_ALG_TYPE_BLKCIPHER_MASK
,
951 .type
= CRYPTO_ALG_TYPE_SKCIPHER
,
952 .tfmsize
= offsetof(struct crypto_skcipher
, base
),
955 int crypto_grab_skcipher(struct crypto_skcipher_spawn
*spawn
,
956 const char *name
, u32 type
, u32 mask
)
958 spawn
->base
.frontend
= &crypto_skcipher_type2
;
959 return crypto_grab_spawn(&spawn
->base
, name
, type
, mask
);
961 EXPORT_SYMBOL_GPL(crypto_grab_skcipher
);
963 struct crypto_skcipher
*crypto_alloc_skcipher(const char *alg_name
,
966 return crypto_alloc_tfm(alg_name
, &crypto_skcipher_type2
, type
, mask
);
968 EXPORT_SYMBOL_GPL(crypto_alloc_skcipher
);
970 int crypto_has_skcipher2(const char *alg_name
, u32 type
, u32 mask
)
972 return crypto_type_has_alg(alg_name
, &crypto_skcipher_type2
,
975 EXPORT_SYMBOL_GPL(crypto_has_skcipher2
);
977 static int skcipher_prepare_alg(struct skcipher_alg
*alg
)
979 struct crypto_alg
*base
= &alg
->base
;
981 if (alg
->ivsize
> PAGE_SIZE
/ 8 || alg
->chunksize
> PAGE_SIZE
/ 8 ||
982 alg
->walksize
> PAGE_SIZE
/ 8)
986 alg
->chunksize
= base
->cra_blocksize
;
988 alg
->walksize
= alg
->chunksize
;
990 base
->cra_type
= &crypto_skcipher_type2
;
991 base
->cra_flags
&= ~CRYPTO_ALG_TYPE_MASK
;
992 base
->cra_flags
|= CRYPTO_ALG_TYPE_SKCIPHER
;
997 int crypto_register_skcipher(struct skcipher_alg
*alg
)
999 struct crypto_alg
*base
= &alg
->base
;
1002 err
= skcipher_prepare_alg(alg
);
1006 return crypto_register_alg(base
);
1008 EXPORT_SYMBOL_GPL(crypto_register_skcipher
);
1010 void crypto_unregister_skcipher(struct skcipher_alg
*alg
)
1012 crypto_unregister_alg(&alg
->base
);
1014 EXPORT_SYMBOL_GPL(crypto_unregister_skcipher
);
1016 int crypto_register_skciphers(struct skcipher_alg
*algs
, int count
)
1020 for (i
= 0; i
< count
; i
++) {
1021 ret
= crypto_register_skcipher(&algs
[i
]);
1029 for (--i
; i
>= 0; --i
)
1030 crypto_unregister_skcipher(&algs
[i
]);
1034 EXPORT_SYMBOL_GPL(crypto_register_skciphers
);
1036 void crypto_unregister_skciphers(struct skcipher_alg
*algs
, int count
)
1040 for (i
= count
- 1; i
>= 0; --i
)
1041 crypto_unregister_skcipher(&algs
[i
]);
1043 EXPORT_SYMBOL_GPL(crypto_unregister_skciphers
);
1045 int skcipher_register_instance(struct crypto_template
*tmpl
,
1046 struct skcipher_instance
*inst
)
1050 err
= skcipher_prepare_alg(&inst
->alg
);
1054 return crypto_register_instance(tmpl
, skcipher_crypto_instance(inst
));
1056 EXPORT_SYMBOL_GPL(skcipher_register_instance
);
1058 MODULE_LICENSE("GPL");
1059 MODULE_DESCRIPTION("Symmetric key cipher type");