1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Asynchronous block chaining cipher operations.
5 * This is the asynchronous version of blkcipher.c indicating completion
8 * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au>
11 #include <crypto/internal/skcipher.h>
12 #include <linux/err.h>
13 #include <linux/kernel.h>
14 #include <linux/slab.h>
15 #include <linux/seq_file.h>
16 #include <linux/cryptouser.h>
17 #include <linux/compiler.h>
18 #include <net/netlink.h>
20 #include <crypto/scatterwalk.h>
24 struct ablkcipher_buffer
{
25 struct list_head entry
;
26 struct scatter_walk dst
;
32 ABLKCIPHER_WALK_SLOW
= 1 << 0,
35 static inline void ablkcipher_buffer_write(struct ablkcipher_buffer
*p
)
37 scatterwalk_copychunks(p
->data
, &p
->dst
, p
->len
, 1);
40 void __ablkcipher_walk_complete(struct ablkcipher_walk
*walk
)
42 struct ablkcipher_buffer
*p
, *tmp
;
44 list_for_each_entry_safe(p
, tmp
, &walk
->buffers
, entry
) {
45 ablkcipher_buffer_write(p
);
50 EXPORT_SYMBOL_GPL(__ablkcipher_walk_complete
);
52 static inline void ablkcipher_queue_write(struct ablkcipher_walk
*walk
,
53 struct ablkcipher_buffer
*p
)
56 list_add_tail(&p
->entry
, &walk
->buffers
);
59 /* Get a spot of the specified length that does not straddle a page.
60 * The caller needs to ensure that there is enough space for this operation.
62 static inline u8
*ablkcipher_get_spot(u8
*start
, unsigned int len
)
64 u8
*end_page
= (u8
*)(((unsigned long)(start
+ len
- 1)) & PAGE_MASK
);
66 return max(start
, end_page
);
69 static inline void ablkcipher_done_slow(struct ablkcipher_walk
*walk
,
73 unsigned int len_this_page
= scatterwalk_pagelen(&walk
->out
);
75 if (len_this_page
> n
)
77 scatterwalk_advance(&walk
->out
, n
);
78 if (n
== len_this_page
)
81 scatterwalk_start(&walk
->out
, sg_next(walk
->out
.sg
));
85 static inline void ablkcipher_done_fast(struct ablkcipher_walk
*walk
,
88 scatterwalk_advance(&walk
->in
, n
);
89 scatterwalk_advance(&walk
->out
, n
);
92 static int ablkcipher_walk_next(struct ablkcipher_request
*req
,
93 struct ablkcipher_walk
*walk
);
95 int ablkcipher_walk_done(struct ablkcipher_request
*req
,
96 struct ablkcipher_walk
*walk
, int err
)
98 struct crypto_tfm
*tfm
= req
->base
.tfm
;
99 unsigned int n
; /* bytes processed */
102 if (unlikely(err
< 0))
105 n
= walk
->nbytes
- err
;
107 more
= (walk
->total
!= 0);
109 if (likely(!(walk
->flags
& ABLKCIPHER_WALK_SLOW
))) {
110 ablkcipher_done_fast(walk
, n
);
113 /* unexpected case; didn't process all bytes */
117 ablkcipher_done_slow(walk
, n
);
120 scatterwalk_done(&walk
->in
, 0, more
);
121 scatterwalk_done(&walk
->out
, 1, more
);
124 crypto_yield(req
->base
.flags
);
125 return ablkcipher_walk_next(req
, walk
);
130 if (walk
->iv
!= req
->info
)
131 memcpy(req
->info
, walk
->iv
, tfm
->crt_ablkcipher
.ivsize
);
132 kfree(walk
->iv_buffer
);
135 EXPORT_SYMBOL_GPL(ablkcipher_walk_done
);
137 static inline int ablkcipher_next_slow(struct ablkcipher_request
*req
,
138 struct ablkcipher_walk
*walk
,
140 unsigned int alignmask
,
141 void **src_p
, void **dst_p
)
143 unsigned aligned_bsize
= ALIGN(bsize
, alignmask
+ 1);
144 struct ablkcipher_buffer
*p
;
145 void *src
, *dst
, *base
;
148 n
= ALIGN(sizeof(struct ablkcipher_buffer
), alignmask
+ 1);
149 n
+= (aligned_bsize
* 3 - (alignmask
+ 1) +
150 (alignmask
& ~(crypto_tfm_ctx_alignment() - 1)));
152 p
= kmalloc(n
, GFP_ATOMIC
);
154 return ablkcipher_walk_done(req
, walk
, -ENOMEM
);
158 dst
= (u8
*)ALIGN((unsigned long)base
, alignmask
+ 1);
159 src
= dst
= ablkcipher_get_spot(dst
, bsize
);
164 scatterwalk_copychunks(src
, &walk
->in
, bsize
, 0);
166 ablkcipher_queue_write(walk
, p
);
168 walk
->nbytes
= bsize
;
169 walk
->flags
|= ABLKCIPHER_WALK_SLOW
;
177 static inline int ablkcipher_copy_iv(struct ablkcipher_walk
*walk
,
178 struct crypto_tfm
*tfm
,
179 unsigned int alignmask
)
181 unsigned bs
= walk
->blocksize
;
182 unsigned int ivsize
= tfm
->crt_ablkcipher
.ivsize
;
183 unsigned aligned_bs
= ALIGN(bs
, alignmask
+ 1);
184 unsigned int size
= aligned_bs
* 2 + ivsize
+ max(aligned_bs
, ivsize
) -
188 size
+= alignmask
& ~(crypto_tfm_ctx_alignment() - 1);
189 walk
->iv_buffer
= kmalloc(size
, GFP_ATOMIC
);
190 if (!walk
->iv_buffer
)
193 iv
= (u8
*)ALIGN((unsigned long)walk
->iv_buffer
, alignmask
+ 1);
194 iv
= ablkcipher_get_spot(iv
, bs
) + aligned_bs
;
195 iv
= ablkcipher_get_spot(iv
, bs
) + aligned_bs
;
196 iv
= ablkcipher_get_spot(iv
, ivsize
);
198 walk
->iv
= memcpy(iv
, walk
->iv
, ivsize
);
202 static inline int ablkcipher_next_fast(struct ablkcipher_request
*req
,
203 struct ablkcipher_walk
*walk
)
205 walk
->src
.page
= scatterwalk_page(&walk
->in
);
206 walk
->src
.offset
= offset_in_page(walk
->in
.offset
);
207 walk
->dst
.page
= scatterwalk_page(&walk
->out
);
208 walk
->dst
.offset
= offset_in_page(walk
->out
.offset
);
213 static int ablkcipher_walk_next(struct ablkcipher_request
*req
,
214 struct ablkcipher_walk
*walk
)
216 struct crypto_tfm
*tfm
= req
->base
.tfm
;
217 unsigned int alignmask
, bsize
, n
;
221 alignmask
= crypto_tfm_alg_alignmask(tfm
);
223 if (unlikely(n
< crypto_tfm_alg_blocksize(tfm
))) {
224 req
->base
.flags
|= CRYPTO_TFM_RES_BAD_BLOCK_LEN
;
225 return ablkcipher_walk_done(req
, walk
, -EINVAL
);
228 walk
->flags
&= ~ABLKCIPHER_WALK_SLOW
;
231 bsize
= min(walk
->blocksize
, n
);
232 n
= scatterwalk_clamp(&walk
->in
, n
);
233 n
= scatterwalk_clamp(&walk
->out
, n
);
236 !scatterwalk_aligned(&walk
->in
, alignmask
) ||
237 !scatterwalk_aligned(&walk
->out
, alignmask
)) {
238 err
= ablkcipher_next_slow(req
, walk
, bsize
, alignmask
,
240 goto set_phys_lowmem
;
245 return ablkcipher_next_fast(req
, walk
);
249 walk
->src
.page
= virt_to_page(src
);
250 walk
->dst
.page
= virt_to_page(dst
);
251 walk
->src
.offset
= ((unsigned long)src
& (PAGE_SIZE
- 1));
252 walk
->dst
.offset
= ((unsigned long)dst
& (PAGE_SIZE
- 1));
258 static int ablkcipher_walk_first(struct ablkcipher_request
*req
,
259 struct ablkcipher_walk
*walk
)
261 struct crypto_tfm
*tfm
= req
->base
.tfm
;
262 unsigned int alignmask
;
264 alignmask
= crypto_tfm_alg_alignmask(tfm
);
265 if (WARN_ON_ONCE(in_irq()))
268 walk
->iv
= req
->info
;
269 walk
->nbytes
= walk
->total
;
270 if (unlikely(!walk
->total
))
273 walk
->iv_buffer
= NULL
;
274 if (unlikely(((unsigned long)walk
->iv
& alignmask
))) {
275 int err
= ablkcipher_copy_iv(walk
, tfm
, alignmask
);
281 scatterwalk_start(&walk
->in
, walk
->in
.sg
);
282 scatterwalk_start(&walk
->out
, walk
->out
.sg
);
284 return ablkcipher_walk_next(req
, walk
);
287 int ablkcipher_walk_phys(struct ablkcipher_request
*req
,
288 struct ablkcipher_walk
*walk
)
290 walk
->blocksize
= crypto_tfm_alg_blocksize(req
->base
.tfm
);
291 return ablkcipher_walk_first(req
, walk
);
293 EXPORT_SYMBOL_GPL(ablkcipher_walk_phys
);
295 static int setkey_unaligned(struct crypto_ablkcipher
*tfm
, const u8
*key
,
298 struct ablkcipher_alg
*cipher
= crypto_ablkcipher_alg(tfm
);
299 unsigned long alignmask
= crypto_ablkcipher_alignmask(tfm
);
301 u8
*buffer
, *alignbuffer
;
302 unsigned long absize
;
304 absize
= keylen
+ alignmask
;
305 buffer
= kmalloc(absize
, GFP_ATOMIC
);
309 alignbuffer
= (u8
*)ALIGN((unsigned long)buffer
, alignmask
+ 1);
310 memcpy(alignbuffer
, key
, keylen
);
311 ret
= cipher
->setkey(tfm
, alignbuffer
, keylen
);
312 memset(alignbuffer
, 0, keylen
);
317 static int setkey(struct crypto_ablkcipher
*tfm
, const u8
*key
,
320 struct ablkcipher_alg
*cipher
= crypto_ablkcipher_alg(tfm
);
321 unsigned long alignmask
= crypto_ablkcipher_alignmask(tfm
);
323 if (keylen
< cipher
->min_keysize
|| keylen
> cipher
->max_keysize
) {
324 crypto_ablkcipher_set_flags(tfm
, CRYPTO_TFM_RES_BAD_KEY_LEN
);
328 if ((unsigned long)key
& alignmask
)
329 return setkey_unaligned(tfm
, key
, keylen
);
331 return cipher
->setkey(tfm
, key
, keylen
);
334 static unsigned int crypto_ablkcipher_ctxsize(struct crypto_alg
*alg
, u32 type
,
337 return alg
->cra_ctxsize
;
340 static int crypto_init_ablkcipher_ops(struct crypto_tfm
*tfm
, u32 type
,
343 struct ablkcipher_alg
*alg
= &tfm
->__crt_alg
->cra_ablkcipher
;
344 struct ablkcipher_tfm
*crt
= &tfm
->crt_ablkcipher
;
346 if (alg
->ivsize
> PAGE_SIZE
/ 8)
349 crt
->setkey
= setkey
;
350 crt
->encrypt
= alg
->encrypt
;
351 crt
->decrypt
= alg
->decrypt
;
352 crt
->base
= __crypto_ablkcipher_cast(tfm
);
353 crt
->ivsize
= alg
->ivsize
;
359 static int crypto_ablkcipher_report(struct sk_buff
*skb
, struct crypto_alg
*alg
)
361 struct crypto_report_blkcipher rblkcipher
;
363 memset(&rblkcipher
, 0, sizeof(rblkcipher
));
365 strscpy(rblkcipher
.type
, "ablkcipher", sizeof(rblkcipher
.type
));
366 strscpy(rblkcipher
.geniv
, "<default>", sizeof(rblkcipher
.geniv
));
368 rblkcipher
.blocksize
= alg
->cra_blocksize
;
369 rblkcipher
.min_keysize
= alg
->cra_ablkcipher
.min_keysize
;
370 rblkcipher
.max_keysize
= alg
->cra_ablkcipher
.max_keysize
;
371 rblkcipher
.ivsize
= alg
->cra_ablkcipher
.ivsize
;
373 return nla_put(skb
, CRYPTOCFGA_REPORT_BLKCIPHER
,
374 sizeof(rblkcipher
), &rblkcipher
);
377 static int crypto_ablkcipher_report(struct sk_buff
*skb
, struct crypto_alg
*alg
)
383 static void crypto_ablkcipher_show(struct seq_file
*m
, struct crypto_alg
*alg
)
385 static void crypto_ablkcipher_show(struct seq_file
*m
, struct crypto_alg
*alg
)
387 struct ablkcipher_alg
*ablkcipher
= &alg
->cra_ablkcipher
;
389 seq_printf(m
, "type : ablkcipher\n");
390 seq_printf(m
, "async : %s\n", alg
->cra_flags
& CRYPTO_ALG_ASYNC
?
392 seq_printf(m
, "blocksize : %u\n", alg
->cra_blocksize
);
393 seq_printf(m
, "min keysize : %u\n", ablkcipher
->min_keysize
);
394 seq_printf(m
, "max keysize : %u\n", ablkcipher
->max_keysize
);
395 seq_printf(m
, "ivsize : %u\n", ablkcipher
->ivsize
);
396 seq_printf(m
, "geniv : <default>\n");
399 const struct crypto_type crypto_ablkcipher_type
= {
400 .ctxsize
= crypto_ablkcipher_ctxsize
,
401 .init
= crypto_init_ablkcipher_ops
,
402 #ifdef CONFIG_PROC_FS
403 .show
= crypto_ablkcipher_show
,
405 .report
= crypto_ablkcipher_report
,
407 EXPORT_SYMBOL_GPL(crypto_ablkcipher_type
);