2 * Block chaining cipher operations.
4 * Generic encrypt/decrypt wrapper for ciphers, handles operations across
5 * multiple page boundaries by using temporary blocks. In user context,
6 * the kernel is given a chance to schedule us once per page.
8 * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au>
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License as published by the Free
12 * Software Foundation; either version 2 of the License, or (at your option)
17 #include <crypto/aead.h>
18 #include <crypto/internal/skcipher.h>
19 #include <crypto/scatterwalk.h>
20 #include <linux/errno.h>
21 #include <linux/hardirq.h>
22 #include <linux/kernel.h>
23 #include <linux/module.h>
24 #include <linux/scatterlist.h>
25 #include <linux/seq_file.h>
26 #include <linux/slab.h>
27 #include <linux/string.h>
28 #include <linux/cryptouser.h>
29 #include <net/netlink.h>
34 BLKCIPHER_WALK_PHYS
= 1 << 0,
35 BLKCIPHER_WALK_SLOW
= 1 << 1,
36 BLKCIPHER_WALK_COPY
= 1 << 2,
37 BLKCIPHER_WALK_DIFF
= 1 << 3,
40 static int blkcipher_walk_next(struct blkcipher_desc
*desc
,
41 struct blkcipher_walk
*walk
);
42 static int blkcipher_walk_first(struct blkcipher_desc
*desc
,
43 struct blkcipher_walk
*walk
);
45 static inline void blkcipher_map_src(struct blkcipher_walk
*walk
)
47 walk
->src
.virt
.addr
= scatterwalk_map(&walk
->in
);
50 static inline void blkcipher_map_dst(struct blkcipher_walk
*walk
)
52 walk
->dst
.virt
.addr
= scatterwalk_map(&walk
->out
);
55 static inline void blkcipher_unmap_src(struct blkcipher_walk
*walk
)
57 scatterwalk_unmap(walk
->src
.virt
.addr
);
60 static inline void blkcipher_unmap_dst(struct blkcipher_walk
*walk
)
62 scatterwalk_unmap(walk
->dst
.virt
.addr
);
65 /* Get a spot of the specified length that does not straddle a page.
66 * The caller needs to ensure that there is enough space for this operation.
68 static inline u8
*blkcipher_get_spot(u8
*start
, unsigned int len
)
70 u8
*end_page
= (u8
*)(((unsigned long)(start
+ len
- 1)) & PAGE_MASK
);
71 return max(start
, end_page
);
74 static inline void blkcipher_done_slow(struct blkcipher_walk
*walk
,
79 addr
= (u8
*)ALIGN((unsigned long)walk
->buffer
, walk
->alignmask
+ 1);
80 addr
= blkcipher_get_spot(addr
, bsize
);
81 scatterwalk_copychunks(addr
, &walk
->out
, bsize
, 1);
84 static inline void blkcipher_done_fast(struct blkcipher_walk
*walk
,
87 if (walk
->flags
& BLKCIPHER_WALK_COPY
) {
88 blkcipher_map_dst(walk
);
89 memcpy(walk
->dst
.virt
.addr
, walk
->page
, n
);
90 blkcipher_unmap_dst(walk
);
91 } else if (!(walk
->flags
& BLKCIPHER_WALK_PHYS
)) {
92 if (walk
->flags
& BLKCIPHER_WALK_DIFF
)
93 blkcipher_unmap_dst(walk
);
94 blkcipher_unmap_src(walk
);
97 scatterwalk_advance(&walk
->in
, n
);
98 scatterwalk_advance(&walk
->out
, n
);
101 int blkcipher_walk_done(struct blkcipher_desc
*desc
,
102 struct blkcipher_walk
*walk
, int err
)
104 unsigned int n
; /* bytes processed */
107 if (unlikely(err
< 0))
110 n
= walk
->nbytes
- err
;
112 more
= (walk
->total
!= 0);
114 if (likely(!(walk
->flags
& BLKCIPHER_WALK_SLOW
))) {
115 blkcipher_done_fast(walk
, n
);
118 /* unexpected case; didn't process all bytes */
122 blkcipher_done_slow(walk
, n
);
125 scatterwalk_done(&walk
->in
, 0, more
);
126 scatterwalk_done(&walk
->out
, 1, more
);
129 crypto_yield(desc
->flags
);
130 return blkcipher_walk_next(desc
, walk
);
135 if (walk
->iv
!= desc
->info
)
136 memcpy(desc
->info
, walk
->iv
, walk
->ivsize
);
137 if (walk
->buffer
!= walk
->page
)
140 free_page((unsigned long)walk
->page
);
143 EXPORT_SYMBOL_GPL(blkcipher_walk_done
);
145 static inline int blkcipher_next_slow(struct blkcipher_desc
*desc
,
146 struct blkcipher_walk
*walk
,
148 unsigned int alignmask
)
151 unsigned aligned_bsize
= ALIGN(bsize
, alignmask
+ 1);
156 walk
->buffer
= walk
->page
;
160 n
= aligned_bsize
* 3 - (alignmask
+ 1) +
161 (alignmask
& ~(crypto_tfm_ctx_alignment() - 1));
162 walk
->buffer
= kmalloc(n
, GFP_ATOMIC
);
164 return blkcipher_walk_done(desc
, walk
, -ENOMEM
);
167 walk
->dst
.virt
.addr
= (u8
*)ALIGN((unsigned long)walk
->buffer
,
169 walk
->dst
.virt
.addr
= blkcipher_get_spot(walk
->dst
.virt
.addr
, bsize
);
170 walk
->src
.virt
.addr
= blkcipher_get_spot(walk
->dst
.virt
.addr
+
171 aligned_bsize
, bsize
);
173 scatterwalk_copychunks(walk
->src
.virt
.addr
, &walk
->in
, bsize
, 0);
175 walk
->nbytes
= bsize
;
176 walk
->flags
|= BLKCIPHER_WALK_SLOW
;
181 static inline int blkcipher_next_copy(struct blkcipher_walk
*walk
)
183 u8
*tmp
= walk
->page
;
185 blkcipher_map_src(walk
);
186 memcpy(tmp
, walk
->src
.virt
.addr
, walk
->nbytes
);
187 blkcipher_unmap_src(walk
);
189 walk
->src
.virt
.addr
= tmp
;
190 walk
->dst
.virt
.addr
= tmp
;
195 static inline int blkcipher_next_fast(struct blkcipher_desc
*desc
,
196 struct blkcipher_walk
*walk
)
200 walk
->src
.phys
.page
= scatterwalk_page(&walk
->in
);
201 walk
->src
.phys
.offset
= offset_in_page(walk
->in
.offset
);
202 walk
->dst
.phys
.page
= scatterwalk_page(&walk
->out
);
203 walk
->dst
.phys
.offset
= offset_in_page(walk
->out
.offset
);
205 if (walk
->flags
& BLKCIPHER_WALK_PHYS
)
208 diff
= walk
->src
.phys
.offset
- walk
->dst
.phys
.offset
;
209 diff
|= walk
->src
.virt
.page
- walk
->dst
.virt
.page
;
211 blkcipher_map_src(walk
);
212 walk
->dst
.virt
.addr
= walk
->src
.virt
.addr
;
215 walk
->flags
|= BLKCIPHER_WALK_DIFF
;
216 blkcipher_map_dst(walk
);
222 static int blkcipher_walk_next(struct blkcipher_desc
*desc
,
223 struct blkcipher_walk
*walk
)
230 if (unlikely(n
< walk
->cipher_blocksize
)) {
231 desc
->flags
|= CRYPTO_TFM_RES_BAD_BLOCK_LEN
;
232 return blkcipher_walk_done(desc
, walk
, -EINVAL
);
235 bsize
= min(walk
->walk_blocksize
, n
);
237 walk
->flags
&= ~(BLKCIPHER_WALK_SLOW
| BLKCIPHER_WALK_COPY
|
238 BLKCIPHER_WALK_DIFF
);
239 if (!scatterwalk_aligned(&walk
->in
, walk
->alignmask
) ||
240 !scatterwalk_aligned(&walk
->out
, walk
->alignmask
)) {
241 walk
->flags
|= BLKCIPHER_WALK_COPY
;
243 walk
->page
= (void *)__get_free_page(GFP_ATOMIC
);
249 n
= scatterwalk_clamp(&walk
->in
, n
);
250 n
= scatterwalk_clamp(&walk
->out
, n
);
252 if (unlikely(n
< bsize
)) {
253 err
= blkcipher_next_slow(desc
, walk
, bsize
, walk
->alignmask
);
254 goto set_phys_lowmem
;
258 if (walk
->flags
& BLKCIPHER_WALK_COPY
) {
259 err
= blkcipher_next_copy(walk
);
260 goto set_phys_lowmem
;
263 return blkcipher_next_fast(desc
, walk
);
266 if (walk
->flags
& BLKCIPHER_WALK_PHYS
) {
267 walk
->src
.phys
.page
= virt_to_page(walk
->src
.virt
.addr
);
268 walk
->dst
.phys
.page
= virt_to_page(walk
->dst
.virt
.addr
);
269 walk
->src
.phys
.offset
&= PAGE_SIZE
- 1;
270 walk
->dst
.phys
.offset
&= PAGE_SIZE
- 1;
275 static inline int blkcipher_copy_iv(struct blkcipher_walk
*walk
)
277 unsigned bs
= walk
->walk_blocksize
;
278 unsigned aligned_bs
= ALIGN(bs
, walk
->alignmask
+ 1);
279 unsigned int size
= aligned_bs
* 2 +
280 walk
->ivsize
+ max(aligned_bs
, walk
->ivsize
) -
281 (walk
->alignmask
+ 1);
284 size
+= walk
->alignmask
& ~(crypto_tfm_ctx_alignment() - 1);
285 walk
->buffer
= kmalloc(size
, GFP_ATOMIC
);
289 iv
= (u8
*)ALIGN((unsigned long)walk
->buffer
, walk
->alignmask
+ 1);
290 iv
= blkcipher_get_spot(iv
, bs
) + aligned_bs
;
291 iv
= blkcipher_get_spot(iv
, bs
) + aligned_bs
;
292 iv
= blkcipher_get_spot(iv
, walk
->ivsize
);
294 walk
->iv
= memcpy(iv
, walk
->iv
, walk
->ivsize
);
298 int blkcipher_walk_virt(struct blkcipher_desc
*desc
,
299 struct blkcipher_walk
*walk
)
301 walk
->flags
&= ~BLKCIPHER_WALK_PHYS
;
302 walk
->walk_blocksize
= crypto_blkcipher_blocksize(desc
->tfm
);
303 walk
->cipher_blocksize
= walk
->walk_blocksize
;
304 walk
->ivsize
= crypto_blkcipher_ivsize(desc
->tfm
);
305 walk
->alignmask
= crypto_blkcipher_alignmask(desc
->tfm
);
306 return blkcipher_walk_first(desc
, walk
);
308 EXPORT_SYMBOL_GPL(blkcipher_walk_virt
);
310 int blkcipher_walk_phys(struct blkcipher_desc
*desc
,
311 struct blkcipher_walk
*walk
)
313 walk
->flags
|= BLKCIPHER_WALK_PHYS
;
314 walk
->walk_blocksize
= crypto_blkcipher_blocksize(desc
->tfm
);
315 walk
->cipher_blocksize
= walk
->walk_blocksize
;
316 walk
->ivsize
= crypto_blkcipher_ivsize(desc
->tfm
);
317 walk
->alignmask
= crypto_blkcipher_alignmask(desc
->tfm
);
318 return blkcipher_walk_first(desc
, walk
);
320 EXPORT_SYMBOL_GPL(blkcipher_walk_phys
);
322 static int blkcipher_walk_first(struct blkcipher_desc
*desc
,
323 struct blkcipher_walk
*walk
)
325 if (WARN_ON_ONCE(in_irq()))
328 walk
->iv
= desc
->info
;
329 walk
->nbytes
= walk
->total
;
330 if (unlikely(!walk
->total
))
334 if (unlikely(((unsigned long)walk
->iv
& walk
->alignmask
))) {
335 int err
= blkcipher_copy_iv(walk
);
340 scatterwalk_start(&walk
->in
, walk
->in
.sg
);
341 scatterwalk_start(&walk
->out
, walk
->out
.sg
);
344 return blkcipher_walk_next(desc
, walk
);
347 int blkcipher_walk_virt_block(struct blkcipher_desc
*desc
,
348 struct blkcipher_walk
*walk
,
349 unsigned int blocksize
)
351 walk
->flags
&= ~BLKCIPHER_WALK_PHYS
;
352 walk
->walk_blocksize
= blocksize
;
353 walk
->cipher_blocksize
= crypto_blkcipher_blocksize(desc
->tfm
);
354 walk
->ivsize
= crypto_blkcipher_ivsize(desc
->tfm
);
355 walk
->alignmask
= crypto_blkcipher_alignmask(desc
->tfm
);
356 return blkcipher_walk_first(desc
, walk
);
358 EXPORT_SYMBOL_GPL(blkcipher_walk_virt_block
);
360 int blkcipher_aead_walk_virt_block(struct blkcipher_desc
*desc
,
361 struct blkcipher_walk
*walk
,
362 struct crypto_aead
*tfm
,
363 unsigned int blocksize
)
365 walk
->flags
&= ~BLKCIPHER_WALK_PHYS
;
366 walk
->walk_blocksize
= blocksize
;
367 walk
->cipher_blocksize
= crypto_aead_blocksize(tfm
);
368 walk
->ivsize
= crypto_aead_ivsize(tfm
);
369 walk
->alignmask
= crypto_aead_alignmask(tfm
);
370 return blkcipher_walk_first(desc
, walk
);
372 EXPORT_SYMBOL_GPL(blkcipher_aead_walk_virt_block
);
374 static int setkey_unaligned(struct crypto_tfm
*tfm
, const u8
*key
,
377 struct blkcipher_alg
*cipher
= &tfm
->__crt_alg
->cra_blkcipher
;
378 unsigned long alignmask
= crypto_tfm_alg_alignmask(tfm
);
380 u8
*buffer
, *alignbuffer
;
381 unsigned long absize
;
383 absize
= keylen
+ alignmask
;
384 buffer
= kmalloc(absize
, GFP_ATOMIC
);
388 alignbuffer
= (u8
*)ALIGN((unsigned long)buffer
, alignmask
+ 1);
389 memcpy(alignbuffer
, key
, keylen
);
390 ret
= cipher
->setkey(tfm
, alignbuffer
, keylen
);
391 memset(alignbuffer
, 0, keylen
);
396 static int setkey(struct crypto_tfm
*tfm
, const u8
*key
, unsigned int keylen
)
398 struct blkcipher_alg
*cipher
= &tfm
->__crt_alg
->cra_blkcipher
;
399 unsigned long alignmask
= crypto_tfm_alg_alignmask(tfm
);
401 if (keylen
< cipher
->min_keysize
|| keylen
> cipher
->max_keysize
) {
402 tfm
->crt_flags
|= CRYPTO_TFM_RES_BAD_KEY_LEN
;
406 if ((unsigned long)key
& alignmask
)
407 return setkey_unaligned(tfm
, key
, keylen
);
409 return cipher
->setkey(tfm
, key
, keylen
);
412 static int async_setkey(struct crypto_ablkcipher
*tfm
, const u8
*key
,
415 return setkey(crypto_ablkcipher_tfm(tfm
), key
, keylen
);
418 static int async_encrypt(struct ablkcipher_request
*req
)
420 struct crypto_tfm
*tfm
= req
->base
.tfm
;
421 struct blkcipher_alg
*alg
= &tfm
->__crt_alg
->cra_blkcipher
;
422 struct blkcipher_desc desc
= {
423 .tfm
= __crypto_blkcipher_cast(tfm
),
425 .flags
= req
->base
.flags
,
429 return alg
->encrypt(&desc
, req
->dst
, req
->src
, req
->nbytes
);
432 static int async_decrypt(struct ablkcipher_request
*req
)
434 struct crypto_tfm
*tfm
= req
->base
.tfm
;
435 struct blkcipher_alg
*alg
= &tfm
->__crt_alg
->cra_blkcipher
;
436 struct blkcipher_desc desc
= {
437 .tfm
= __crypto_blkcipher_cast(tfm
),
439 .flags
= req
->base
.flags
,
442 return alg
->decrypt(&desc
, req
->dst
, req
->src
, req
->nbytes
);
445 static unsigned int crypto_blkcipher_ctxsize(struct crypto_alg
*alg
, u32 type
,
448 struct blkcipher_alg
*cipher
= &alg
->cra_blkcipher
;
449 unsigned int len
= alg
->cra_ctxsize
;
451 if ((mask
& CRYPTO_ALG_TYPE_MASK
) == CRYPTO_ALG_TYPE_MASK
&&
453 len
= ALIGN(len
, (unsigned long)alg
->cra_alignmask
+ 1);
454 len
+= cipher
->ivsize
;
460 static int crypto_init_blkcipher_ops_async(struct crypto_tfm
*tfm
)
462 struct ablkcipher_tfm
*crt
= &tfm
->crt_ablkcipher
;
463 struct blkcipher_alg
*alg
= &tfm
->__crt_alg
->cra_blkcipher
;
465 crt
->setkey
= async_setkey
;
466 crt
->encrypt
= async_encrypt
;
467 crt
->decrypt
= async_decrypt
;
469 crt
->givencrypt
= skcipher_null_givencrypt
;
470 crt
->givdecrypt
= skcipher_null_givdecrypt
;
472 crt
->base
= __crypto_ablkcipher_cast(tfm
);
473 crt
->ivsize
= alg
->ivsize
;
478 static int crypto_init_blkcipher_ops_sync(struct crypto_tfm
*tfm
)
480 struct blkcipher_tfm
*crt
= &tfm
->crt_blkcipher
;
481 struct blkcipher_alg
*alg
= &tfm
->__crt_alg
->cra_blkcipher
;
482 unsigned long align
= crypto_tfm_alg_alignmask(tfm
) + 1;
485 crt
->setkey
= setkey
;
486 crt
->encrypt
= alg
->encrypt
;
487 crt
->decrypt
= alg
->decrypt
;
489 addr
= (unsigned long)crypto_tfm_ctx(tfm
);
490 addr
= ALIGN(addr
, align
);
491 addr
+= ALIGN(tfm
->__crt_alg
->cra_ctxsize
, align
);
492 crt
->iv
= (void *)addr
;
497 static int crypto_init_blkcipher_ops(struct crypto_tfm
*tfm
, u32 type
, u32 mask
)
499 struct blkcipher_alg
*alg
= &tfm
->__crt_alg
->cra_blkcipher
;
501 if (alg
->ivsize
> PAGE_SIZE
/ 8)
504 if ((mask
& CRYPTO_ALG_TYPE_MASK
) == CRYPTO_ALG_TYPE_MASK
)
505 return crypto_init_blkcipher_ops_sync(tfm
);
507 return crypto_init_blkcipher_ops_async(tfm
);
511 static int crypto_blkcipher_report(struct sk_buff
*skb
, struct crypto_alg
*alg
)
513 struct crypto_report_blkcipher rblkcipher
;
515 strncpy(rblkcipher
.type
, "blkcipher", sizeof(rblkcipher
.type
));
516 strncpy(rblkcipher
.geniv
, alg
->cra_blkcipher
.geniv
?: "<default>",
517 sizeof(rblkcipher
.geniv
));
518 rblkcipher
.geniv
[sizeof(rblkcipher
.geniv
) - 1] = '\0';
520 rblkcipher
.blocksize
= alg
->cra_blocksize
;
521 rblkcipher
.min_keysize
= alg
->cra_blkcipher
.min_keysize
;
522 rblkcipher
.max_keysize
= alg
->cra_blkcipher
.max_keysize
;
523 rblkcipher
.ivsize
= alg
->cra_blkcipher
.ivsize
;
525 if (nla_put(skb
, CRYPTOCFGA_REPORT_BLKCIPHER
,
526 sizeof(struct crypto_report_blkcipher
), &rblkcipher
))
527 goto nla_put_failure
;
534 static int crypto_blkcipher_report(struct sk_buff
*skb
, struct crypto_alg
*alg
)
540 static void crypto_blkcipher_show(struct seq_file
*m
, struct crypto_alg
*alg
)
541 __attribute__ ((unused
));
542 static void crypto_blkcipher_show(struct seq_file
*m
, struct crypto_alg
*alg
)
544 seq_printf(m
, "type : blkcipher\n");
545 seq_printf(m
, "blocksize : %u\n", alg
->cra_blocksize
);
546 seq_printf(m
, "min keysize : %u\n", alg
->cra_blkcipher
.min_keysize
);
547 seq_printf(m
, "max keysize : %u\n", alg
->cra_blkcipher
.max_keysize
);
548 seq_printf(m
, "ivsize : %u\n", alg
->cra_blkcipher
.ivsize
);
549 seq_printf(m
, "geniv : %s\n", alg
->cra_blkcipher
.geniv
?:
553 const struct crypto_type crypto_blkcipher_type
= {
554 .ctxsize
= crypto_blkcipher_ctxsize
,
555 .init
= crypto_init_blkcipher_ops
,
556 #ifdef CONFIG_PROC_FS
557 .show
= crypto_blkcipher_show
,
559 .report
= crypto_blkcipher_report
,
561 EXPORT_SYMBOL_GPL(crypto_blkcipher_type
);
563 static int crypto_grab_nivcipher(struct crypto_skcipher_spawn
*spawn
,
564 const char *name
, u32 type
, u32 mask
)
566 struct crypto_alg
*alg
;
569 type
= crypto_skcipher_type(type
);
570 mask
= crypto_skcipher_mask(mask
)| CRYPTO_ALG_GENIV
;
572 alg
= crypto_alg_mod_lookup(name
, type
, mask
);
576 err
= crypto_init_spawn(&spawn
->base
, alg
, spawn
->base
.inst
, mask
);
581 struct crypto_instance
*skcipher_geniv_alloc(struct crypto_template
*tmpl
,
582 struct rtattr
**tb
, u32 type
,
586 int (*setkey
)(struct crypto_ablkcipher
*tfm
, const u8
*key
,
587 unsigned int keylen
);
588 int (*encrypt
)(struct ablkcipher_request
*req
);
589 int (*decrypt
)(struct ablkcipher_request
*req
);
591 unsigned int min_keysize
;
592 unsigned int max_keysize
;
598 struct crypto_skcipher_spawn
*spawn
;
599 struct crypto_attr_type
*algt
;
600 struct crypto_instance
*inst
;
601 struct crypto_alg
*alg
;
604 algt
= crypto_get_attr_type(tb
);
606 return ERR_CAST(algt
);
608 if ((algt
->type
^ (CRYPTO_ALG_TYPE_GIVCIPHER
| CRYPTO_ALG_GENIV
)) &
610 return ERR_PTR(-EINVAL
);
612 name
= crypto_attr_alg_name(tb
[1]);
614 return ERR_CAST(name
);
616 inst
= kzalloc(sizeof(*inst
) + sizeof(*spawn
), GFP_KERNEL
);
618 return ERR_PTR(-ENOMEM
);
620 spawn
= crypto_instance_ctx(inst
);
622 /* Ignore async algorithms if necessary. */
623 mask
|= crypto_requires_sync(algt
->type
, algt
->mask
);
625 crypto_set_skcipher_spawn(spawn
, inst
);
626 err
= crypto_grab_nivcipher(spawn
, name
, type
, mask
);
630 alg
= crypto_skcipher_spawn_alg(spawn
);
632 if ((alg
->cra_flags
& CRYPTO_ALG_TYPE_MASK
) ==
633 CRYPTO_ALG_TYPE_BLKCIPHER
) {
634 balg
.ivsize
= alg
->cra_blkcipher
.ivsize
;
635 balg
.min_keysize
= alg
->cra_blkcipher
.min_keysize
;
636 balg
.max_keysize
= alg
->cra_blkcipher
.max_keysize
;
638 balg
.setkey
= async_setkey
;
639 balg
.encrypt
= async_encrypt
;
640 balg
.decrypt
= async_decrypt
;
642 balg
.geniv
= alg
->cra_blkcipher
.geniv
;
644 balg
.ivsize
= alg
->cra_ablkcipher
.ivsize
;
645 balg
.min_keysize
= alg
->cra_ablkcipher
.min_keysize
;
646 balg
.max_keysize
= alg
->cra_ablkcipher
.max_keysize
;
648 balg
.setkey
= alg
->cra_ablkcipher
.setkey
;
649 balg
.encrypt
= alg
->cra_ablkcipher
.encrypt
;
650 balg
.decrypt
= alg
->cra_ablkcipher
.decrypt
;
652 balg
.geniv
= alg
->cra_ablkcipher
.geniv
;
660 * This is only true if we're constructing an algorithm with its
661 * default IV generator. For the default generator we elide the
662 * template name and double-check the IV generator.
664 if (algt
->mask
& CRYPTO_ALG_GENIV
) {
666 balg
.geniv
= crypto_default_geniv(alg
);
668 if (strcmp(tmpl
->name
, balg
.geniv
))
671 memcpy(inst
->alg
.cra_name
, alg
->cra_name
, CRYPTO_MAX_ALG_NAME
);
672 memcpy(inst
->alg
.cra_driver_name
, alg
->cra_driver_name
,
673 CRYPTO_MAX_ALG_NAME
);
676 if (snprintf(inst
->alg
.cra_name
, CRYPTO_MAX_ALG_NAME
,
677 "%s(%s)", tmpl
->name
, alg
->cra_name
) >=
680 if (snprintf(inst
->alg
.cra_driver_name
, CRYPTO_MAX_ALG_NAME
,
681 "%s(%s)", tmpl
->name
, alg
->cra_driver_name
) >=
686 inst
->alg
.cra_flags
= CRYPTO_ALG_TYPE_GIVCIPHER
| CRYPTO_ALG_GENIV
;
687 inst
->alg
.cra_flags
|= alg
->cra_flags
& CRYPTO_ALG_ASYNC
;
688 inst
->alg
.cra_priority
= alg
->cra_priority
;
689 inst
->alg
.cra_blocksize
= alg
->cra_blocksize
;
690 inst
->alg
.cra_alignmask
= alg
->cra_alignmask
;
691 inst
->alg
.cra_type
= &crypto_givcipher_type
;
693 inst
->alg
.cra_ablkcipher
.ivsize
= balg
.ivsize
;
694 inst
->alg
.cra_ablkcipher
.min_keysize
= balg
.min_keysize
;
695 inst
->alg
.cra_ablkcipher
.max_keysize
= balg
.max_keysize
;
696 inst
->alg
.cra_ablkcipher
.geniv
= balg
.geniv
;
698 inst
->alg
.cra_ablkcipher
.setkey
= balg
.setkey
;
699 inst
->alg
.cra_ablkcipher
.encrypt
= balg
.encrypt
;
700 inst
->alg
.cra_ablkcipher
.decrypt
= balg
.decrypt
;
706 crypto_drop_skcipher(spawn
);
712 EXPORT_SYMBOL_GPL(skcipher_geniv_alloc
);
714 void skcipher_geniv_free(struct crypto_instance
*inst
)
716 crypto_drop_skcipher(crypto_instance_ctx(inst
));
719 EXPORT_SYMBOL_GPL(skcipher_geniv_free
);
721 int skcipher_geniv_init(struct crypto_tfm
*tfm
)
723 struct crypto_instance
*inst
= (void *)tfm
->__crt_alg
;
724 struct crypto_ablkcipher
*cipher
;
726 cipher
= crypto_spawn_skcipher(crypto_instance_ctx(inst
));
728 return PTR_ERR(cipher
);
730 tfm
->crt_ablkcipher
.base
= cipher
;
731 tfm
->crt_ablkcipher
.reqsize
+= crypto_ablkcipher_reqsize(cipher
);
735 EXPORT_SYMBOL_GPL(skcipher_geniv_init
);
737 void skcipher_geniv_exit(struct crypto_tfm
*tfm
)
739 crypto_free_ablkcipher(tfm
->crt_ablkcipher
.base
);
741 EXPORT_SYMBOL_GPL(skcipher_geniv_exit
);
743 MODULE_LICENSE("GPL");
744 MODULE_DESCRIPTION("Generic block chaining cipher type");