Merge tag 'phy-for-4.13' of git://git.kernel.org/pub/scm/linux/kernel/git/kishon...
[linux/fpc-iii.git] / crypto / ablkcipher.c
blobd880a489715976fef528cfc7544956d1ce761b52
1 /*
2 * Asynchronous block chaining cipher operations.
4 * This is the asynchronous version of blkcipher.c indicating completion
5 * via a callback.
7 * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au>
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms of the GNU General Public License as published by the Free
11 * Software Foundation; either version 2 of the License, or (at your option)
12 * any later version.
16 #include <crypto/internal/skcipher.h>
17 #include <linux/err.h>
18 #include <linux/kernel.h>
19 #include <linux/slab.h>
20 #include <linux/seq_file.h>
21 #include <linux/cryptouser.h>
22 #include <linux/compiler.h>
23 #include <net/netlink.h>
25 #include <crypto/scatterwalk.h>
27 #include "internal.h"
29 struct ablkcipher_buffer {
30 struct list_head entry;
31 struct scatter_walk dst;
32 unsigned int len;
33 void *data;
36 enum {
37 ABLKCIPHER_WALK_SLOW = 1 << 0,
40 static inline void ablkcipher_buffer_write(struct ablkcipher_buffer *p)
42 scatterwalk_copychunks(p->data, &p->dst, p->len, 1);
45 void __ablkcipher_walk_complete(struct ablkcipher_walk *walk)
47 struct ablkcipher_buffer *p, *tmp;
49 list_for_each_entry_safe(p, tmp, &walk->buffers, entry) {
50 ablkcipher_buffer_write(p);
51 list_del(&p->entry);
52 kfree(p);
55 EXPORT_SYMBOL_GPL(__ablkcipher_walk_complete);
57 static inline void ablkcipher_queue_write(struct ablkcipher_walk *walk,
58 struct ablkcipher_buffer *p)
60 p->dst = walk->out;
61 list_add_tail(&p->entry, &walk->buffers);
64 /* Get a spot of the specified length that does not straddle a page.
65 * The caller needs to ensure that there is enough space for this operation.
67 static inline u8 *ablkcipher_get_spot(u8 *start, unsigned int len)
69 u8 *end_page = (u8 *)(((unsigned long)(start + len - 1)) & PAGE_MASK);
71 return max(start, end_page);
74 static inline unsigned int ablkcipher_done_slow(struct ablkcipher_walk *walk,
75 unsigned int bsize)
77 unsigned int n = bsize;
79 for (;;) {
80 unsigned int len_this_page = scatterwalk_pagelen(&walk->out);
82 if (len_this_page > n)
83 len_this_page = n;
84 scatterwalk_advance(&walk->out, n);
85 if (n == len_this_page)
86 break;
87 n -= len_this_page;
88 scatterwalk_start(&walk->out, sg_next(walk->out.sg));
91 return bsize;
94 static inline unsigned int ablkcipher_done_fast(struct ablkcipher_walk *walk,
95 unsigned int n)
97 scatterwalk_advance(&walk->in, n);
98 scatterwalk_advance(&walk->out, n);
100 return n;
103 static int ablkcipher_walk_next(struct ablkcipher_request *req,
104 struct ablkcipher_walk *walk);
106 int ablkcipher_walk_done(struct ablkcipher_request *req,
107 struct ablkcipher_walk *walk, int err)
109 struct crypto_tfm *tfm = req->base.tfm;
110 unsigned int nbytes = 0;
112 if (likely(err >= 0)) {
113 unsigned int n = walk->nbytes - err;
115 if (likely(!(walk->flags & ABLKCIPHER_WALK_SLOW)))
116 n = ablkcipher_done_fast(walk, n);
117 else if (WARN_ON(err)) {
118 err = -EINVAL;
119 goto err;
120 } else
121 n = ablkcipher_done_slow(walk, n);
123 nbytes = walk->total - n;
124 err = 0;
127 scatterwalk_done(&walk->in, 0, nbytes);
128 scatterwalk_done(&walk->out, 1, nbytes);
130 err:
131 walk->total = nbytes;
132 walk->nbytes = nbytes;
134 if (nbytes) {
135 crypto_yield(req->base.flags);
136 return ablkcipher_walk_next(req, walk);
139 if (walk->iv != req->info)
140 memcpy(req->info, walk->iv, tfm->crt_ablkcipher.ivsize);
141 kfree(walk->iv_buffer);
143 return err;
145 EXPORT_SYMBOL_GPL(ablkcipher_walk_done);
147 static inline int ablkcipher_next_slow(struct ablkcipher_request *req,
148 struct ablkcipher_walk *walk,
149 unsigned int bsize,
150 unsigned int alignmask,
151 void **src_p, void **dst_p)
153 unsigned aligned_bsize = ALIGN(bsize, alignmask + 1);
154 struct ablkcipher_buffer *p;
155 void *src, *dst, *base;
156 unsigned int n;
158 n = ALIGN(sizeof(struct ablkcipher_buffer), alignmask + 1);
159 n += (aligned_bsize * 3 - (alignmask + 1) +
160 (alignmask & ~(crypto_tfm_ctx_alignment() - 1)));
162 p = kmalloc(n, GFP_ATOMIC);
163 if (!p)
164 return ablkcipher_walk_done(req, walk, -ENOMEM);
166 base = p + 1;
168 dst = (u8 *)ALIGN((unsigned long)base, alignmask + 1);
169 src = dst = ablkcipher_get_spot(dst, bsize);
171 p->len = bsize;
172 p->data = dst;
174 scatterwalk_copychunks(src, &walk->in, bsize, 0);
176 ablkcipher_queue_write(walk, p);
178 walk->nbytes = bsize;
179 walk->flags |= ABLKCIPHER_WALK_SLOW;
181 *src_p = src;
182 *dst_p = dst;
184 return 0;
187 static inline int ablkcipher_copy_iv(struct ablkcipher_walk *walk,
188 struct crypto_tfm *tfm,
189 unsigned int alignmask)
191 unsigned bs = walk->blocksize;
192 unsigned int ivsize = tfm->crt_ablkcipher.ivsize;
193 unsigned aligned_bs = ALIGN(bs, alignmask + 1);
194 unsigned int size = aligned_bs * 2 + ivsize + max(aligned_bs, ivsize) -
195 (alignmask + 1);
196 u8 *iv;
198 size += alignmask & ~(crypto_tfm_ctx_alignment() - 1);
199 walk->iv_buffer = kmalloc(size, GFP_ATOMIC);
200 if (!walk->iv_buffer)
201 return -ENOMEM;
203 iv = (u8 *)ALIGN((unsigned long)walk->iv_buffer, alignmask + 1);
204 iv = ablkcipher_get_spot(iv, bs) + aligned_bs;
205 iv = ablkcipher_get_spot(iv, bs) + aligned_bs;
206 iv = ablkcipher_get_spot(iv, ivsize);
208 walk->iv = memcpy(iv, walk->iv, ivsize);
209 return 0;
212 static inline int ablkcipher_next_fast(struct ablkcipher_request *req,
213 struct ablkcipher_walk *walk)
215 walk->src.page = scatterwalk_page(&walk->in);
216 walk->src.offset = offset_in_page(walk->in.offset);
217 walk->dst.page = scatterwalk_page(&walk->out);
218 walk->dst.offset = offset_in_page(walk->out.offset);
220 return 0;
223 static int ablkcipher_walk_next(struct ablkcipher_request *req,
224 struct ablkcipher_walk *walk)
226 struct crypto_tfm *tfm = req->base.tfm;
227 unsigned int alignmask, bsize, n;
228 void *src, *dst;
229 int err;
231 alignmask = crypto_tfm_alg_alignmask(tfm);
232 n = walk->total;
233 if (unlikely(n < crypto_tfm_alg_blocksize(tfm))) {
234 req->base.flags |= CRYPTO_TFM_RES_BAD_BLOCK_LEN;
235 return ablkcipher_walk_done(req, walk, -EINVAL);
238 walk->flags &= ~ABLKCIPHER_WALK_SLOW;
239 src = dst = NULL;
241 bsize = min(walk->blocksize, n);
242 n = scatterwalk_clamp(&walk->in, n);
243 n = scatterwalk_clamp(&walk->out, n);
245 if (n < bsize ||
246 !scatterwalk_aligned(&walk->in, alignmask) ||
247 !scatterwalk_aligned(&walk->out, alignmask)) {
248 err = ablkcipher_next_slow(req, walk, bsize, alignmask,
249 &src, &dst);
250 goto set_phys_lowmem;
253 walk->nbytes = n;
255 return ablkcipher_next_fast(req, walk);
257 set_phys_lowmem:
258 if (err >= 0) {
259 walk->src.page = virt_to_page(src);
260 walk->dst.page = virt_to_page(dst);
261 walk->src.offset = ((unsigned long)src & (PAGE_SIZE - 1));
262 walk->dst.offset = ((unsigned long)dst & (PAGE_SIZE - 1));
265 return err;
268 static int ablkcipher_walk_first(struct ablkcipher_request *req,
269 struct ablkcipher_walk *walk)
271 struct crypto_tfm *tfm = req->base.tfm;
272 unsigned int alignmask;
274 alignmask = crypto_tfm_alg_alignmask(tfm);
275 if (WARN_ON_ONCE(in_irq()))
276 return -EDEADLK;
278 walk->iv = req->info;
279 walk->nbytes = walk->total;
280 if (unlikely(!walk->total))
281 return 0;
283 walk->iv_buffer = NULL;
284 if (unlikely(((unsigned long)walk->iv & alignmask))) {
285 int err = ablkcipher_copy_iv(walk, tfm, alignmask);
287 if (err)
288 return err;
291 scatterwalk_start(&walk->in, walk->in.sg);
292 scatterwalk_start(&walk->out, walk->out.sg);
294 return ablkcipher_walk_next(req, walk);
297 int ablkcipher_walk_phys(struct ablkcipher_request *req,
298 struct ablkcipher_walk *walk)
300 walk->blocksize = crypto_tfm_alg_blocksize(req->base.tfm);
301 return ablkcipher_walk_first(req, walk);
303 EXPORT_SYMBOL_GPL(ablkcipher_walk_phys);
305 static int setkey_unaligned(struct crypto_ablkcipher *tfm, const u8 *key,
306 unsigned int keylen)
308 struct ablkcipher_alg *cipher = crypto_ablkcipher_alg(tfm);
309 unsigned long alignmask = crypto_ablkcipher_alignmask(tfm);
310 int ret;
311 u8 *buffer, *alignbuffer;
312 unsigned long absize;
314 absize = keylen + alignmask;
315 buffer = kmalloc(absize, GFP_ATOMIC);
316 if (!buffer)
317 return -ENOMEM;
319 alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1);
320 memcpy(alignbuffer, key, keylen);
321 ret = cipher->setkey(tfm, alignbuffer, keylen);
322 memset(alignbuffer, 0, keylen);
323 kfree(buffer);
324 return ret;
327 static int setkey(struct crypto_ablkcipher *tfm, const u8 *key,
328 unsigned int keylen)
330 struct ablkcipher_alg *cipher = crypto_ablkcipher_alg(tfm);
331 unsigned long alignmask = crypto_ablkcipher_alignmask(tfm);
333 if (keylen < cipher->min_keysize || keylen > cipher->max_keysize) {
334 crypto_ablkcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
335 return -EINVAL;
338 if ((unsigned long)key & alignmask)
339 return setkey_unaligned(tfm, key, keylen);
341 return cipher->setkey(tfm, key, keylen);
344 static unsigned int crypto_ablkcipher_ctxsize(struct crypto_alg *alg, u32 type,
345 u32 mask)
347 return alg->cra_ctxsize;
350 static int crypto_init_ablkcipher_ops(struct crypto_tfm *tfm, u32 type,
351 u32 mask)
353 struct ablkcipher_alg *alg = &tfm->__crt_alg->cra_ablkcipher;
354 struct ablkcipher_tfm *crt = &tfm->crt_ablkcipher;
356 if (alg->ivsize > PAGE_SIZE / 8)
357 return -EINVAL;
359 crt->setkey = setkey;
360 crt->encrypt = alg->encrypt;
361 crt->decrypt = alg->decrypt;
362 crt->base = __crypto_ablkcipher_cast(tfm);
363 crt->ivsize = alg->ivsize;
365 return 0;
368 #ifdef CONFIG_NET
369 static int crypto_ablkcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
371 struct crypto_report_blkcipher rblkcipher;
373 strncpy(rblkcipher.type, "ablkcipher", sizeof(rblkcipher.type));
374 strncpy(rblkcipher.geniv, alg->cra_ablkcipher.geniv ?: "<default>",
375 sizeof(rblkcipher.geniv));
377 rblkcipher.blocksize = alg->cra_blocksize;
378 rblkcipher.min_keysize = alg->cra_ablkcipher.min_keysize;
379 rblkcipher.max_keysize = alg->cra_ablkcipher.max_keysize;
380 rblkcipher.ivsize = alg->cra_ablkcipher.ivsize;
382 if (nla_put(skb, CRYPTOCFGA_REPORT_BLKCIPHER,
383 sizeof(struct crypto_report_blkcipher), &rblkcipher))
384 goto nla_put_failure;
385 return 0;
387 nla_put_failure:
388 return -EMSGSIZE;
390 #else
391 static int crypto_ablkcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
393 return -ENOSYS;
395 #endif
397 static void crypto_ablkcipher_show(struct seq_file *m, struct crypto_alg *alg)
398 __maybe_unused;
399 static void crypto_ablkcipher_show(struct seq_file *m, struct crypto_alg *alg)
401 struct ablkcipher_alg *ablkcipher = &alg->cra_ablkcipher;
403 seq_printf(m, "type : ablkcipher\n");
404 seq_printf(m, "async : %s\n", alg->cra_flags & CRYPTO_ALG_ASYNC ?
405 "yes" : "no");
406 seq_printf(m, "blocksize : %u\n", alg->cra_blocksize);
407 seq_printf(m, "min keysize : %u\n", ablkcipher->min_keysize);
408 seq_printf(m, "max keysize : %u\n", ablkcipher->max_keysize);
409 seq_printf(m, "ivsize : %u\n", ablkcipher->ivsize);
410 seq_printf(m, "geniv : %s\n", ablkcipher->geniv ?: "<default>");
413 const struct crypto_type crypto_ablkcipher_type = {
414 .ctxsize = crypto_ablkcipher_ctxsize,
415 .init = crypto_init_ablkcipher_ops,
416 #ifdef CONFIG_PROC_FS
417 .show = crypto_ablkcipher_show,
418 #endif
419 .report = crypto_ablkcipher_report,
421 EXPORT_SYMBOL_GPL(crypto_ablkcipher_type);
423 static int crypto_init_givcipher_ops(struct crypto_tfm *tfm, u32 type,
424 u32 mask)
426 struct ablkcipher_alg *alg = &tfm->__crt_alg->cra_ablkcipher;
427 struct ablkcipher_tfm *crt = &tfm->crt_ablkcipher;
429 if (alg->ivsize > PAGE_SIZE / 8)
430 return -EINVAL;
432 crt->setkey = tfm->__crt_alg->cra_flags & CRYPTO_ALG_GENIV ?
433 alg->setkey : setkey;
434 crt->encrypt = alg->encrypt;
435 crt->decrypt = alg->decrypt;
436 crt->base = __crypto_ablkcipher_cast(tfm);
437 crt->ivsize = alg->ivsize;
439 return 0;
442 #ifdef CONFIG_NET
443 static int crypto_givcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
445 struct crypto_report_blkcipher rblkcipher;
447 strncpy(rblkcipher.type, "givcipher", sizeof(rblkcipher.type));
448 strncpy(rblkcipher.geniv, alg->cra_ablkcipher.geniv ?: "<built-in>",
449 sizeof(rblkcipher.geniv));
451 rblkcipher.blocksize = alg->cra_blocksize;
452 rblkcipher.min_keysize = alg->cra_ablkcipher.min_keysize;
453 rblkcipher.max_keysize = alg->cra_ablkcipher.max_keysize;
454 rblkcipher.ivsize = alg->cra_ablkcipher.ivsize;
456 if (nla_put(skb, CRYPTOCFGA_REPORT_BLKCIPHER,
457 sizeof(struct crypto_report_blkcipher), &rblkcipher))
458 goto nla_put_failure;
459 return 0;
461 nla_put_failure:
462 return -EMSGSIZE;
464 #else
465 static int crypto_givcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
467 return -ENOSYS;
469 #endif
471 static void crypto_givcipher_show(struct seq_file *m, struct crypto_alg *alg)
472 __maybe_unused;
473 static void crypto_givcipher_show(struct seq_file *m, struct crypto_alg *alg)
475 struct ablkcipher_alg *ablkcipher = &alg->cra_ablkcipher;
477 seq_printf(m, "type : givcipher\n");
478 seq_printf(m, "async : %s\n", alg->cra_flags & CRYPTO_ALG_ASYNC ?
479 "yes" : "no");
480 seq_printf(m, "blocksize : %u\n", alg->cra_blocksize);
481 seq_printf(m, "min keysize : %u\n", ablkcipher->min_keysize);
482 seq_printf(m, "max keysize : %u\n", ablkcipher->max_keysize);
483 seq_printf(m, "ivsize : %u\n", ablkcipher->ivsize);
484 seq_printf(m, "geniv : %s\n", ablkcipher->geniv ?: "<built-in>");
487 const struct crypto_type crypto_givcipher_type = {
488 .ctxsize = crypto_ablkcipher_ctxsize,
489 .init = crypto_init_givcipher_ops,
490 #ifdef CONFIG_PROC_FS
491 .show = crypto_givcipher_show,
492 #endif
493 .report = crypto_givcipher_report,
495 EXPORT_SYMBOL_GPL(crypto_givcipher_type);