1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Block chaining cipher operations.
5 * Generic encrypt/decrypt wrapper for ciphers, handles operations across
6 * multiple page boundaries by using temporary blocks. In user context,
7 * the kernel is given a chance to schedule us once per page.
9 * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au>
12 #include <crypto/aead.h>
13 #include <crypto/internal/skcipher.h>
14 #include <crypto/scatterwalk.h>
15 #include <linux/errno.h>
16 #include <linux/kernel.h>
17 #include <linux/module.h>
18 #include <linux/seq_file.h>
19 #include <linux/slab.h>
20 #include <linux/string.h>
21 #include <linux/cryptouser.h>
22 #include <linux/compiler.h>
23 #include <net/netlink.h>
28 BLKCIPHER_WALK_PHYS
= 1 << 0,
29 BLKCIPHER_WALK_SLOW
= 1 << 1,
30 BLKCIPHER_WALK_COPY
= 1 << 2,
31 BLKCIPHER_WALK_DIFF
= 1 << 3,
34 static int blkcipher_walk_next(struct blkcipher_desc
*desc
,
35 struct blkcipher_walk
*walk
);
36 static int blkcipher_walk_first(struct blkcipher_desc
*desc
,
37 struct blkcipher_walk
*walk
);
39 static inline void blkcipher_map_src(struct blkcipher_walk
*walk
)
41 walk
->src
.virt
.addr
= scatterwalk_map(&walk
->in
);
44 static inline void blkcipher_map_dst(struct blkcipher_walk
*walk
)
46 walk
->dst
.virt
.addr
= scatterwalk_map(&walk
->out
);
49 static inline void blkcipher_unmap_src(struct blkcipher_walk
*walk
)
51 scatterwalk_unmap(walk
->src
.virt
.addr
);
54 static inline void blkcipher_unmap_dst(struct blkcipher_walk
*walk
)
56 scatterwalk_unmap(walk
->dst
.virt
.addr
);
59 /* Get a spot of the specified length that does not straddle a page.
60 * The caller needs to ensure that there is enough space for this operation.
62 static inline u8
*blkcipher_get_spot(u8
*start
, unsigned int len
)
64 u8
*end_page
= (u8
*)(((unsigned long)(start
+ len
- 1)) & PAGE_MASK
);
65 return max(start
, end_page
);
68 static inline void blkcipher_done_slow(struct blkcipher_walk
*walk
,
73 addr
= (u8
*)ALIGN((unsigned long)walk
->buffer
, walk
->alignmask
+ 1);
74 addr
= blkcipher_get_spot(addr
, bsize
);
75 scatterwalk_copychunks(addr
, &walk
->out
, bsize
, 1);
78 static inline void blkcipher_done_fast(struct blkcipher_walk
*walk
,
81 if (walk
->flags
& BLKCIPHER_WALK_COPY
) {
82 blkcipher_map_dst(walk
);
83 memcpy(walk
->dst
.virt
.addr
, walk
->page
, n
);
84 blkcipher_unmap_dst(walk
);
85 } else if (!(walk
->flags
& BLKCIPHER_WALK_PHYS
)) {
86 if (walk
->flags
& BLKCIPHER_WALK_DIFF
)
87 blkcipher_unmap_dst(walk
);
88 blkcipher_unmap_src(walk
);
91 scatterwalk_advance(&walk
->in
, n
);
92 scatterwalk_advance(&walk
->out
, n
);
95 int blkcipher_walk_done(struct blkcipher_desc
*desc
,
96 struct blkcipher_walk
*walk
, int err
)
98 unsigned int n
; /* bytes processed */
101 if (unlikely(err
< 0))
104 n
= walk
->nbytes
- err
;
106 more
= (walk
->total
!= 0);
108 if (likely(!(walk
->flags
& BLKCIPHER_WALK_SLOW
))) {
109 blkcipher_done_fast(walk
, n
);
112 /* unexpected case; didn't process all bytes */
116 blkcipher_done_slow(walk
, n
);
119 scatterwalk_done(&walk
->in
, 0, more
);
120 scatterwalk_done(&walk
->out
, 1, more
);
123 crypto_yield(desc
->flags
);
124 return blkcipher_walk_next(desc
, walk
);
129 if (walk
->iv
!= desc
->info
)
130 memcpy(desc
->info
, walk
->iv
, walk
->ivsize
);
131 if (walk
->buffer
!= walk
->page
)
134 free_page((unsigned long)walk
->page
);
137 EXPORT_SYMBOL_GPL(blkcipher_walk_done
);
139 static inline int blkcipher_next_slow(struct blkcipher_desc
*desc
,
140 struct blkcipher_walk
*walk
,
142 unsigned int alignmask
)
145 unsigned aligned_bsize
= ALIGN(bsize
, alignmask
+ 1);
150 walk
->buffer
= walk
->page
;
154 n
= aligned_bsize
* 3 - (alignmask
+ 1) +
155 (alignmask
& ~(crypto_tfm_ctx_alignment() - 1));
156 walk
->buffer
= kmalloc(n
, GFP_ATOMIC
);
158 return blkcipher_walk_done(desc
, walk
, -ENOMEM
);
161 walk
->dst
.virt
.addr
= (u8
*)ALIGN((unsigned long)walk
->buffer
,
163 walk
->dst
.virt
.addr
= blkcipher_get_spot(walk
->dst
.virt
.addr
, bsize
);
164 walk
->src
.virt
.addr
= blkcipher_get_spot(walk
->dst
.virt
.addr
+
165 aligned_bsize
, bsize
);
167 scatterwalk_copychunks(walk
->src
.virt
.addr
, &walk
->in
, bsize
, 0);
169 walk
->nbytes
= bsize
;
170 walk
->flags
|= BLKCIPHER_WALK_SLOW
;
175 static inline int blkcipher_next_copy(struct blkcipher_walk
*walk
)
177 u8
*tmp
= walk
->page
;
179 blkcipher_map_src(walk
);
180 memcpy(tmp
, walk
->src
.virt
.addr
, walk
->nbytes
);
181 blkcipher_unmap_src(walk
);
183 walk
->src
.virt
.addr
= tmp
;
184 walk
->dst
.virt
.addr
= tmp
;
189 static inline int blkcipher_next_fast(struct blkcipher_desc
*desc
,
190 struct blkcipher_walk
*walk
)
194 walk
->src
.phys
.page
= scatterwalk_page(&walk
->in
);
195 walk
->src
.phys
.offset
= offset_in_page(walk
->in
.offset
);
196 walk
->dst
.phys
.page
= scatterwalk_page(&walk
->out
);
197 walk
->dst
.phys
.offset
= offset_in_page(walk
->out
.offset
);
199 if (walk
->flags
& BLKCIPHER_WALK_PHYS
)
202 diff
= walk
->src
.phys
.offset
- walk
->dst
.phys
.offset
;
203 diff
|= walk
->src
.virt
.page
- walk
->dst
.virt
.page
;
205 blkcipher_map_src(walk
);
206 walk
->dst
.virt
.addr
= walk
->src
.virt
.addr
;
209 walk
->flags
|= BLKCIPHER_WALK_DIFF
;
210 blkcipher_map_dst(walk
);
216 static int blkcipher_walk_next(struct blkcipher_desc
*desc
,
217 struct blkcipher_walk
*walk
)
224 if (unlikely(n
< walk
->cipher_blocksize
)) {
225 desc
->flags
|= CRYPTO_TFM_RES_BAD_BLOCK_LEN
;
226 return blkcipher_walk_done(desc
, walk
, -EINVAL
);
229 bsize
= min(walk
->walk_blocksize
, n
);
231 walk
->flags
&= ~(BLKCIPHER_WALK_SLOW
| BLKCIPHER_WALK_COPY
|
232 BLKCIPHER_WALK_DIFF
);
233 if (!scatterwalk_aligned(&walk
->in
, walk
->alignmask
) ||
234 !scatterwalk_aligned(&walk
->out
, walk
->alignmask
)) {
235 walk
->flags
|= BLKCIPHER_WALK_COPY
;
237 walk
->page
= (void *)__get_free_page(GFP_ATOMIC
);
243 n
= scatterwalk_clamp(&walk
->in
, n
);
244 n
= scatterwalk_clamp(&walk
->out
, n
);
246 if (unlikely(n
< bsize
)) {
247 err
= blkcipher_next_slow(desc
, walk
, bsize
, walk
->alignmask
);
248 goto set_phys_lowmem
;
252 if (walk
->flags
& BLKCIPHER_WALK_COPY
) {
253 err
= blkcipher_next_copy(walk
);
254 goto set_phys_lowmem
;
257 return blkcipher_next_fast(desc
, walk
);
260 if (walk
->flags
& BLKCIPHER_WALK_PHYS
) {
261 walk
->src
.phys
.page
= virt_to_page(walk
->src
.virt
.addr
);
262 walk
->dst
.phys
.page
= virt_to_page(walk
->dst
.virt
.addr
);
263 walk
->src
.phys
.offset
&= PAGE_SIZE
- 1;
264 walk
->dst
.phys
.offset
&= PAGE_SIZE
- 1;
269 static inline int blkcipher_copy_iv(struct blkcipher_walk
*walk
)
271 unsigned bs
= walk
->walk_blocksize
;
272 unsigned aligned_bs
= ALIGN(bs
, walk
->alignmask
+ 1);
273 unsigned int size
= aligned_bs
* 2 +
274 walk
->ivsize
+ max(aligned_bs
, walk
->ivsize
) -
275 (walk
->alignmask
+ 1);
278 size
+= walk
->alignmask
& ~(crypto_tfm_ctx_alignment() - 1);
279 walk
->buffer
= kmalloc(size
, GFP_ATOMIC
);
283 iv
= (u8
*)ALIGN((unsigned long)walk
->buffer
, walk
->alignmask
+ 1);
284 iv
= blkcipher_get_spot(iv
, bs
) + aligned_bs
;
285 iv
= blkcipher_get_spot(iv
, bs
) + aligned_bs
;
286 iv
= blkcipher_get_spot(iv
, walk
->ivsize
);
288 walk
->iv
= memcpy(iv
, walk
->iv
, walk
->ivsize
);
292 int blkcipher_walk_virt(struct blkcipher_desc
*desc
,
293 struct blkcipher_walk
*walk
)
295 walk
->flags
&= ~BLKCIPHER_WALK_PHYS
;
296 walk
->walk_blocksize
= crypto_blkcipher_blocksize(desc
->tfm
);
297 walk
->cipher_blocksize
= walk
->walk_blocksize
;
298 walk
->ivsize
= crypto_blkcipher_ivsize(desc
->tfm
);
299 walk
->alignmask
= crypto_blkcipher_alignmask(desc
->tfm
);
300 return blkcipher_walk_first(desc
, walk
);
302 EXPORT_SYMBOL_GPL(blkcipher_walk_virt
);
304 int blkcipher_walk_phys(struct blkcipher_desc
*desc
,
305 struct blkcipher_walk
*walk
)
307 walk
->flags
|= BLKCIPHER_WALK_PHYS
;
308 walk
->walk_blocksize
= crypto_blkcipher_blocksize(desc
->tfm
);
309 walk
->cipher_blocksize
= walk
->walk_blocksize
;
310 walk
->ivsize
= crypto_blkcipher_ivsize(desc
->tfm
);
311 walk
->alignmask
= crypto_blkcipher_alignmask(desc
->tfm
);
312 return blkcipher_walk_first(desc
, walk
);
314 EXPORT_SYMBOL_GPL(blkcipher_walk_phys
);
316 static int blkcipher_walk_first(struct blkcipher_desc
*desc
,
317 struct blkcipher_walk
*walk
)
319 if (WARN_ON_ONCE(in_irq()))
322 walk
->iv
= desc
->info
;
323 walk
->nbytes
= walk
->total
;
324 if (unlikely(!walk
->total
))
328 if (unlikely(((unsigned long)walk
->iv
& walk
->alignmask
))) {
329 int err
= blkcipher_copy_iv(walk
);
334 scatterwalk_start(&walk
->in
, walk
->in
.sg
);
335 scatterwalk_start(&walk
->out
, walk
->out
.sg
);
338 return blkcipher_walk_next(desc
, walk
);
341 int blkcipher_walk_virt_block(struct blkcipher_desc
*desc
,
342 struct blkcipher_walk
*walk
,
343 unsigned int blocksize
)
345 walk
->flags
&= ~BLKCIPHER_WALK_PHYS
;
346 walk
->walk_blocksize
= blocksize
;
347 walk
->cipher_blocksize
= crypto_blkcipher_blocksize(desc
->tfm
);
348 walk
->ivsize
= crypto_blkcipher_ivsize(desc
->tfm
);
349 walk
->alignmask
= crypto_blkcipher_alignmask(desc
->tfm
);
350 return blkcipher_walk_first(desc
, walk
);
352 EXPORT_SYMBOL_GPL(blkcipher_walk_virt_block
);
354 int blkcipher_aead_walk_virt_block(struct blkcipher_desc
*desc
,
355 struct blkcipher_walk
*walk
,
356 struct crypto_aead
*tfm
,
357 unsigned int blocksize
)
359 walk
->flags
&= ~BLKCIPHER_WALK_PHYS
;
360 walk
->walk_blocksize
= blocksize
;
361 walk
->cipher_blocksize
= crypto_aead_blocksize(tfm
);
362 walk
->ivsize
= crypto_aead_ivsize(tfm
);
363 walk
->alignmask
= crypto_aead_alignmask(tfm
);
364 return blkcipher_walk_first(desc
, walk
);
366 EXPORT_SYMBOL_GPL(blkcipher_aead_walk_virt_block
);
368 static int setkey_unaligned(struct crypto_tfm
*tfm
, const u8
*key
,
371 struct blkcipher_alg
*cipher
= &tfm
->__crt_alg
->cra_blkcipher
;
372 unsigned long alignmask
= crypto_tfm_alg_alignmask(tfm
);
374 u8
*buffer
, *alignbuffer
;
375 unsigned long absize
;
377 absize
= keylen
+ alignmask
;
378 buffer
= kmalloc(absize
, GFP_ATOMIC
);
382 alignbuffer
= (u8
*)ALIGN((unsigned long)buffer
, alignmask
+ 1);
383 memcpy(alignbuffer
, key
, keylen
);
384 ret
= cipher
->setkey(tfm
, alignbuffer
, keylen
);
385 memset(alignbuffer
, 0, keylen
);
390 static int setkey(struct crypto_tfm
*tfm
, const u8
*key
, unsigned int keylen
)
392 struct blkcipher_alg
*cipher
= &tfm
->__crt_alg
->cra_blkcipher
;
393 unsigned long alignmask
= crypto_tfm_alg_alignmask(tfm
);
395 if (keylen
< cipher
->min_keysize
|| keylen
> cipher
->max_keysize
) {
396 tfm
->crt_flags
|= CRYPTO_TFM_RES_BAD_KEY_LEN
;
400 if ((unsigned long)key
& alignmask
)
401 return setkey_unaligned(tfm
, key
, keylen
);
403 return cipher
->setkey(tfm
, key
, keylen
);
406 static int async_setkey(struct crypto_ablkcipher
*tfm
, const u8
*key
,
409 return setkey(crypto_ablkcipher_tfm(tfm
), key
, keylen
);
412 static int async_encrypt(struct ablkcipher_request
*req
)
414 struct crypto_tfm
*tfm
= req
->base
.tfm
;
415 struct blkcipher_alg
*alg
= &tfm
->__crt_alg
->cra_blkcipher
;
416 struct blkcipher_desc desc
= {
417 .tfm
= __crypto_blkcipher_cast(tfm
),
419 .flags
= req
->base
.flags
,
423 return alg
->encrypt(&desc
, req
->dst
, req
->src
, req
->nbytes
);
426 static int async_decrypt(struct ablkcipher_request
*req
)
428 struct crypto_tfm
*tfm
= req
->base
.tfm
;
429 struct blkcipher_alg
*alg
= &tfm
->__crt_alg
->cra_blkcipher
;
430 struct blkcipher_desc desc
= {
431 .tfm
= __crypto_blkcipher_cast(tfm
),
433 .flags
= req
->base
.flags
,
436 return alg
->decrypt(&desc
, req
->dst
, req
->src
, req
->nbytes
);
439 static unsigned int crypto_blkcipher_ctxsize(struct crypto_alg
*alg
, u32 type
,
442 struct blkcipher_alg
*cipher
= &alg
->cra_blkcipher
;
443 unsigned int len
= alg
->cra_ctxsize
;
445 if ((mask
& CRYPTO_ALG_TYPE_MASK
) == CRYPTO_ALG_TYPE_MASK
&&
447 len
= ALIGN(len
, (unsigned long)alg
->cra_alignmask
+ 1);
448 len
+= cipher
->ivsize
;
454 static int crypto_init_blkcipher_ops_async(struct crypto_tfm
*tfm
)
456 struct ablkcipher_tfm
*crt
= &tfm
->crt_ablkcipher
;
457 struct blkcipher_alg
*alg
= &tfm
->__crt_alg
->cra_blkcipher
;
459 crt
->setkey
= async_setkey
;
460 crt
->encrypt
= async_encrypt
;
461 crt
->decrypt
= async_decrypt
;
462 crt
->base
= __crypto_ablkcipher_cast(tfm
);
463 crt
->ivsize
= alg
->ivsize
;
468 static int crypto_init_blkcipher_ops_sync(struct crypto_tfm
*tfm
)
470 struct blkcipher_tfm
*crt
= &tfm
->crt_blkcipher
;
471 struct blkcipher_alg
*alg
= &tfm
->__crt_alg
->cra_blkcipher
;
472 unsigned long align
= crypto_tfm_alg_alignmask(tfm
) + 1;
475 crt
->setkey
= setkey
;
476 crt
->encrypt
= alg
->encrypt
;
477 crt
->decrypt
= alg
->decrypt
;
479 addr
= (unsigned long)crypto_tfm_ctx(tfm
);
480 addr
= ALIGN(addr
, align
);
481 addr
+= ALIGN(tfm
->__crt_alg
->cra_ctxsize
, align
);
482 crt
->iv
= (void *)addr
;
487 static int crypto_init_blkcipher_ops(struct crypto_tfm
*tfm
, u32 type
, u32 mask
)
489 struct blkcipher_alg
*alg
= &tfm
->__crt_alg
->cra_blkcipher
;
491 if (alg
->ivsize
> PAGE_SIZE
/ 8)
494 if ((mask
& CRYPTO_ALG_TYPE_MASK
) == CRYPTO_ALG_TYPE_MASK
)
495 return crypto_init_blkcipher_ops_sync(tfm
);
497 return crypto_init_blkcipher_ops_async(tfm
);
501 static int crypto_blkcipher_report(struct sk_buff
*skb
, struct crypto_alg
*alg
)
503 struct crypto_report_blkcipher rblkcipher
;
505 memset(&rblkcipher
, 0, sizeof(rblkcipher
));
507 strscpy(rblkcipher
.type
, "blkcipher", sizeof(rblkcipher
.type
));
508 strscpy(rblkcipher
.geniv
, "<default>", sizeof(rblkcipher
.geniv
));
510 rblkcipher
.blocksize
= alg
->cra_blocksize
;
511 rblkcipher
.min_keysize
= alg
->cra_blkcipher
.min_keysize
;
512 rblkcipher
.max_keysize
= alg
->cra_blkcipher
.max_keysize
;
513 rblkcipher
.ivsize
= alg
->cra_blkcipher
.ivsize
;
515 return nla_put(skb
, CRYPTOCFGA_REPORT_BLKCIPHER
,
516 sizeof(rblkcipher
), &rblkcipher
);
519 static int crypto_blkcipher_report(struct sk_buff
*skb
, struct crypto_alg
*alg
)
525 static void crypto_blkcipher_show(struct seq_file
*m
, struct crypto_alg
*alg
)
527 static void crypto_blkcipher_show(struct seq_file
*m
, struct crypto_alg
*alg
)
529 seq_printf(m
, "type : blkcipher\n");
530 seq_printf(m
, "blocksize : %u\n", alg
->cra_blocksize
);
531 seq_printf(m
, "min keysize : %u\n", alg
->cra_blkcipher
.min_keysize
);
532 seq_printf(m
, "max keysize : %u\n", alg
->cra_blkcipher
.max_keysize
);
533 seq_printf(m
, "ivsize : %u\n", alg
->cra_blkcipher
.ivsize
);
534 seq_printf(m
, "geniv : <default>\n");
537 const struct crypto_type crypto_blkcipher_type
= {
538 .ctxsize
= crypto_blkcipher_ctxsize
,
539 .init
= crypto_init_blkcipher_ops
,
540 #ifdef CONFIG_PROC_FS
541 .show
= crypto_blkcipher_show
,
543 .report
= crypto_blkcipher_report
,
545 EXPORT_SYMBOL_GPL(crypto_blkcipher_type
);
547 MODULE_LICENSE("GPL");
548 MODULE_DESCRIPTION("Generic block chaining cipher type");