ppp: take reference on channels netns
[linux/fpc-iii.git] / crypto / ablkcipher.c
blob970b48e70624cedd8c054c6e398cdd084dd62bf3
1 /*
2 * Asynchronous block chaining cipher operations.
4 * This is the asynchronous version of blkcipher.c indicating completion
5 * via a callback.
7 * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au>
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms of the GNU General Public License as published by the Free
11 * Software Foundation; either version 2 of the License, or (at your option)
12 * any later version.
16 #include <crypto/internal/skcipher.h>
17 #include <linux/cpumask.h>
18 #include <linux/err.h>
19 #include <linux/kernel.h>
20 #include <linux/rtnetlink.h>
21 #include <linux/sched.h>
22 #include <linux/slab.h>
23 #include <linux/seq_file.h>
24 #include <linux/cryptouser.h>
25 #include <net/netlink.h>
27 #include <crypto/scatterwalk.h>
29 #include "internal.h"
31 struct ablkcipher_buffer {
32 struct list_head entry;
33 struct scatter_walk dst;
34 unsigned int len;
35 void *data;
38 enum {
39 ABLKCIPHER_WALK_SLOW = 1 << 0,
42 static inline void ablkcipher_buffer_write(struct ablkcipher_buffer *p)
44 scatterwalk_copychunks(p->data, &p->dst, p->len, 1);
47 void __ablkcipher_walk_complete(struct ablkcipher_walk *walk)
49 struct ablkcipher_buffer *p, *tmp;
51 list_for_each_entry_safe(p, tmp, &walk->buffers, entry) {
52 ablkcipher_buffer_write(p);
53 list_del(&p->entry);
54 kfree(p);
57 EXPORT_SYMBOL_GPL(__ablkcipher_walk_complete);
59 static inline void ablkcipher_queue_write(struct ablkcipher_walk *walk,
60 struct ablkcipher_buffer *p)
62 p->dst = walk->out;
63 list_add_tail(&p->entry, &walk->buffers);
66 /* Get a spot of the specified length that does not straddle a page.
67 * The caller needs to ensure that there is enough space for this operation.
69 static inline u8 *ablkcipher_get_spot(u8 *start, unsigned int len)
71 u8 *end_page = (u8 *)(((unsigned long)(start + len - 1)) & PAGE_MASK);
72 return max(start, end_page);
75 static inline unsigned int ablkcipher_done_slow(struct ablkcipher_walk *walk,
76 unsigned int bsize)
78 unsigned int n = bsize;
80 for (;;) {
81 unsigned int len_this_page = scatterwalk_pagelen(&walk->out);
83 if (len_this_page > n)
84 len_this_page = n;
85 scatterwalk_advance(&walk->out, n);
86 if (n == len_this_page)
87 break;
88 n -= len_this_page;
89 scatterwalk_start(&walk->out, scatterwalk_sg_next(walk->out.sg));
92 return bsize;
95 static inline unsigned int ablkcipher_done_fast(struct ablkcipher_walk *walk,
96 unsigned int n)
98 scatterwalk_advance(&walk->in, n);
99 scatterwalk_advance(&walk->out, n);
101 return n;
104 static int ablkcipher_walk_next(struct ablkcipher_request *req,
105 struct ablkcipher_walk *walk);
107 int ablkcipher_walk_done(struct ablkcipher_request *req,
108 struct ablkcipher_walk *walk, int err)
110 struct crypto_tfm *tfm = req->base.tfm;
111 unsigned int nbytes = 0;
113 if (likely(err >= 0)) {
114 unsigned int n = walk->nbytes - err;
116 if (likely(!(walk->flags & ABLKCIPHER_WALK_SLOW)))
117 n = ablkcipher_done_fast(walk, n);
118 else if (WARN_ON(err)) {
119 err = -EINVAL;
120 goto err;
121 } else
122 n = ablkcipher_done_slow(walk, n);
124 nbytes = walk->total - n;
125 err = 0;
128 scatterwalk_done(&walk->in, 0, nbytes);
129 scatterwalk_done(&walk->out, 1, nbytes);
131 err:
132 walk->total = nbytes;
133 walk->nbytes = nbytes;
135 if (nbytes) {
136 crypto_yield(req->base.flags);
137 return ablkcipher_walk_next(req, walk);
140 if (walk->iv != req->info)
141 memcpy(req->info, walk->iv, tfm->crt_ablkcipher.ivsize);
142 kfree(walk->iv_buffer);
144 return err;
146 EXPORT_SYMBOL_GPL(ablkcipher_walk_done);
148 static inline int ablkcipher_next_slow(struct ablkcipher_request *req,
149 struct ablkcipher_walk *walk,
150 unsigned int bsize,
151 unsigned int alignmask,
152 void **src_p, void **dst_p)
154 unsigned aligned_bsize = ALIGN(bsize, alignmask + 1);
155 struct ablkcipher_buffer *p;
156 void *src, *dst, *base;
157 unsigned int n;
159 n = ALIGN(sizeof(struct ablkcipher_buffer), alignmask + 1);
160 n += (aligned_bsize * 3 - (alignmask + 1) +
161 (alignmask & ~(crypto_tfm_ctx_alignment() - 1)));
163 p = kmalloc(n, GFP_ATOMIC);
164 if (!p)
165 return ablkcipher_walk_done(req, walk, -ENOMEM);
167 base = p + 1;
169 dst = (u8 *)ALIGN((unsigned long)base, alignmask + 1);
170 src = dst = ablkcipher_get_spot(dst, bsize);
172 p->len = bsize;
173 p->data = dst;
175 scatterwalk_copychunks(src, &walk->in, bsize, 0);
177 ablkcipher_queue_write(walk, p);
179 walk->nbytes = bsize;
180 walk->flags |= ABLKCIPHER_WALK_SLOW;
182 *src_p = src;
183 *dst_p = dst;
185 return 0;
188 static inline int ablkcipher_copy_iv(struct ablkcipher_walk *walk,
189 struct crypto_tfm *tfm,
190 unsigned int alignmask)
192 unsigned bs = walk->blocksize;
193 unsigned int ivsize = tfm->crt_ablkcipher.ivsize;
194 unsigned aligned_bs = ALIGN(bs, alignmask + 1);
195 unsigned int size = aligned_bs * 2 + ivsize + max(aligned_bs, ivsize) -
196 (alignmask + 1);
197 u8 *iv;
199 size += alignmask & ~(crypto_tfm_ctx_alignment() - 1);
200 walk->iv_buffer = kmalloc(size, GFP_ATOMIC);
201 if (!walk->iv_buffer)
202 return -ENOMEM;
204 iv = (u8 *)ALIGN((unsigned long)walk->iv_buffer, alignmask + 1);
205 iv = ablkcipher_get_spot(iv, bs) + aligned_bs;
206 iv = ablkcipher_get_spot(iv, bs) + aligned_bs;
207 iv = ablkcipher_get_spot(iv, ivsize);
209 walk->iv = memcpy(iv, walk->iv, ivsize);
210 return 0;
213 static inline int ablkcipher_next_fast(struct ablkcipher_request *req,
214 struct ablkcipher_walk *walk)
216 walk->src.page = scatterwalk_page(&walk->in);
217 walk->src.offset = offset_in_page(walk->in.offset);
218 walk->dst.page = scatterwalk_page(&walk->out);
219 walk->dst.offset = offset_in_page(walk->out.offset);
221 return 0;
224 static int ablkcipher_walk_next(struct ablkcipher_request *req,
225 struct ablkcipher_walk *walk)
227 struct crypto_tfm *tfm = req->base.tfm;
228 unsigned int alignmask, bsize, n;
229 void *src, *dst;
230 int err;
232 alignmask = crypto_tfm_alg_alignmask(tfm);
233 n = walk->total;
234 if (unlikely(n < crypto_tfm_alg_blocksize(tfm))) {
235 req->base.flags |= CRYPTO_TFM_RES_BAD_BLOCK_LEN;
236 return ablkcipher_walk_done(req, walk, -EINVAL);
239 walk->flags &= ~ABLKCIPHER_WALK_SLOW;
240 src = dst = NULL;
242 bsize = min(walk->blocksize, n);
243 n = scatterwalk_clamp(&walk->in, n);
244 n = scatterwalk_clamp(&walk->out, n);
246 if (n < bsize ||
247 !scatterwalk_aligned(&walk->in, alignmask) ||
248 !scatterwalk_aligned(&walk->out, alignmask)) {
249 err = ablkcipher_next_slow(req, walk, bsize, alignmask,
250 &src, &dst);
251 goto set_phys_lowmem;
254 walk->nbytes = n;
256 return ablkcipher_next_fast(req, walk);
258 set_phys_lowmem:
259 if (err >= 0) {
260 walk->src.page = virt_to_page(src);
261 walk->dst.page = virt_to_page(dst);
262 walk->src.offset = ((unsigned long)src & (PAGE_SIZE - 1));
263 walk->dst.offset = ((unsigned long)dst & (PAGE_SIZE - 1));
266 return err;
269 static int ablkcipher_walk_first(struct ablkcipher_request *req,
270 struct ablkcipher_walk *walk)
272 struct crypto_tfm *tfm = req->base.tfm;
273 unsigned int alignmask;
275 alignmask = crypto_tfm_alg_alignmask(tfm);
276 if (WARN_ON_ONCE(in_irq()))
277 return -EDEADLK;
279 walk->iv = req->info;
280 walk->nbytes = walk->total;
281 if (unlikely(!walk->total))
282 return 0;
284 walk->iv_buffer = NULL;
285 if (unlikely(((unsigned long)walk->iv & alignmask))) {
286 int err = ablkcipher_copy_iv(walk, tfm, alignmask);
287 if (err)
288 return err;
291 scatterwalk_start(&walk->in, walk->in.sg);
292 scatterwalk_start(&walk->out, walk->out.sg);
294 return ablkcipher_walk_next(req, walk);
297 int ablkcipher_walk_phys(struct ablkcipher_request *req,
298 struct ablkcipher_walk *walk)
300 walk->blocksize = crypto_tfm_alg_blocksize(req->base.tfm);
301 return ablkcipher_walk_first(req, walk);
303 EXPORT_SYMBOL_GPL(ablkcipher_walk_phys);
305 static int setkey_unaligned(struct crypto_ablkcipher *tfm, const u8 *key,
306 unsigned int keylen)
308 struct ablkcipher_alg *cipher = crypto_ablkcipher_alg(tfm);
309 unsigned long alignmask = crypto_ablkcipher_alignmask(tfm);
310 int ret;
311 u8 *buffer, *alignbuffer;
312 unsigned long absize;
314 absize = keylen + alignmask;
315 buffer = kmalloc(absize, GFP_ATOMIC);
316 if (!buffer)
317 return -ENOMEM;
319 alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1);
320 memcpy(alignbuffer, key, keylen);
321 ret = cipher->setkey(tfm, alignbuffer, keylen);
322 memset(alignbuffer, 0, keylen);
323 kfree(buffer);
324 return ret;
327 static int setkey(struct crypto_ablkcipher *tfm, const u8 *key,
328 unsigned int keylen)
330 struct ablkcipher_alg *cipher = crypto_ablkcipher_alg(tfm);
331 unsigned long alignmask = crypto_ablkcipher_alignmask(tfm);
333 if (keylen < cipher->min_keysize || keylen > cipher->max_keysize) {
334 crypto_ablkcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
335 return -EINVAL;
338 if ((unsigned long)key & alignmask)
339 return setkey_unaligned(tfm, key, keylen);
341 return cipher->setkey(tfm, key, keylen);
344 static unsigned int crypto_ablkcipher_ctxsize(struct crypto_alg *alg, u32 type,
345 u32 mask)
347 return alg->cra_ctxsize;
350 int skcipher_null_givencrypt(struct skcipher_givcrypt_request *req)
352 return crypto_ablkcipher_encrypt(&req->creq);
355 int skcipher_null_givdecrypt(struct skcipher_givcrypt_request *req)
357 return crypto_ablkcipher_decrypt(&req->creq);
360 static int crypto_init_ablkcipher_ops(struct crypto_tfm *tfm, u32 type,
361 u32 mask)
363 struct ablkcipher_alg *alg = &tfm->__crt_alg->cra_ablkcipher;
364 struct ablkcipher_tfm *crt = &tfm->crt_ablkcipher;
366 if (alg->ivsize > PAGE_SIZE / 8)
367 return -EINVAL;
369 crt->setkey = setkey;
370 crt->encrypt = alg->encrypt;
371 crt->decrypt = alg->decrypt;
372 if (!alg->ivsize) {
373 crt->givencrypt = skcipher_null_givencrypt;
374 crt->givdecrypt = skcipher_null_givdecrypt;
376 crt->base = __crypto_ablkcipher_cast(tfm);
377 crt->ivsize = alg->ivsize;
378 crt->has_setkey = alg->max_keysize;
380 return 0;
383 #ifdef CONFIG_NET
384 static int crypto_ablkcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
386 struct crypto_report_blkcipher rblkcipher;
388 strncpy(rblkcipher.type, "ablkcipher", sizeof(rblkcipher.type));
389 strncpy(rblkcipher.geniv, alg->cra_ablkcipher.geniv ?: "<default>",
390 sizeof(rblkcipher.geniv));
392 rblkcipher.blocksize = alg->cra_blocksize;
393 rblkcipher.min_keysize = alg->cra_ablkcipher.min_keysize;
394 rblkcipher.max_keysize = alg->cra_ablkcipher.max_keysize;
395 rblkcipher.ivsize = alg->cra_ablkcipher.ivsize;
397 if (nla_put(skb, CRYPTOCFGA_REPORT_BLKCIPHER,
398 sizeof(struct crypto_report_blkcipher), &rblkcipher))
399 goto nla_put_failure;
400 return 0;
402 nla_put_failure:
403 return -EMSGSIZE;
405 #else
406 static int crypto_ablkcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
408 return -ENOSYS;
410 #endif
412 static void crypto_ablkcipher_show(struct seq_file *m, struct crypto_alg *alg)
413 __attribute__ ((unused));
414 static void crypto_ablkcipher_show(struct seq_file *m, struct crypto_alg *alg)
416 struct ablkcipher_alg *ablkcipher = &alg->cra_ablkcipher;
418 seq_printf(m, "type : ablkcipher\n");
419 seq_printf(m, "async : %s\n", alg->cra_flags & CRYPTO_ALG_ASYNC ?
420 "yes" : "no");
421 seq_printf(m, "blocksize : %u\n", alg->cra_blocksize);
422 seq_printf(m, "min keysize : %u\n", ablkcipher->min_keysize);
423 seq_printf(m, "max keysize : %u\n", ablkcipher->max_keysize);
424 seq_printf(m, "ivsize : %u\n", ablkcipher->ivsize);
425 seq_printf(m, "geniv : %s\n", ablkcipher->geniv ?: "<default>");
428 const struct crypto_type crypto_ablkcipher_type = {
429 .ctxsize = crypto_ablkcipher_ctxsize,
430 .init = crypto_init_ablkcipher_ops,
431 #ifdef CONFIG_PROC_FS
432 .show = crypto_ablkcipher_show,
433 #endif
434 .report = crypto_ablkcipher_report,
436 EXPORT_SYMBOL_GPL(crypto_ablkcipher_type);
438 static int no_givdecrypt(struct skcipher_givcrypt_request *req)
440 return -ENOSYS;
443 static int crypto_init_givcipher_ops(struct crypto_tfm *tfm, u32 type,
444 u32 mask)
446 struct ablkcipher_alg *alg = &tfm->__crt_alg->cra_ablkcipher;
447 struct ablkcipher_tfm *crt = &tfm->crt_ablkcipher;
449 if (alg->ivsize > PAGE_SIZE / 8)
450 return -EINVAL;
452 crt->setkey = tfm->__crt_alg->cra_flags & CRYPTO_ALG_GENIV ?
453 alg->setkey : setkey;
454 crt->encrypt = alg->encrypt;
455 crt->decrypt = alg->decrypt;
456 crt->givencrypt = alg->givencrypt;
457 crt->givdecrypt = alg->givdecrypt ?: no_givdecrypt;
458 crt->base = __crypto_ablkcipher_cast(tfm);
459 crt->ivsize = alg->ivsize;
460 crt->has_setkey = alg->max_keysize;
462 return 0;
465 #ifdef CONFIG_NET
466 static int crypto_givcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
468 struct crypto_report_blkcipher rblkcipher;
470 strncpy(rblkcipher.type, "givcipher", sizeof(rblkcipher.type));
471 strncpy(rblkcipher.geniv, alg->cra_ablkcipher.geniv ?: "<built-in>",
472 sizeof(rblkcipher.geniv));
474 rblkcipher.blocksize = alg->cra_blocksize;
475 rblkcipher.min_keysize = alg->cra_ablkcipher.min_keysize;
476 rblkcipher.max_keysize = alg->cra_ablkcipher.max_keysize;
477 rblkcipher.ivsize = alg->cra_ablkcipher.ivsize;
479 if (nla_put(skb, CRYPTOCFGA_REPORT_BLKCIPHER,
480 sizeof(struct crypto_report_blkcipher), &rblkcipher))
481 goto nla_put_failure;
482 return 0;
484 nla_put_failure:
485 return -EMSGSIZE;
487 #else
488 static int crypto_givcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
490 return -ENOSYS;
492 #endif
494 static void crypto_givcipher_show(struct seq_file *m, struct crypto_alg *alg)
495 __attribute__ ((unused));
496 static void crypto_givcipher_show(struct seq_file *m, struct crypto_alg *alg)
498 struct ablkcipher_alg *ablkcipher = &alg->cra_ablkcipher;
500 seq_printf(m, "type : givcipher\n");
501 seq_printf(m, "async : %s\n", alg->cra_flags & CRYPTO_ALG_ASYNC ?
502 "yes" : "no");
503 seq_printf(m, "blocksize : %u\n", alg->cra_blocksize);
504 seq_printf(m, "min keysize : %u\n", ablkcipher->min_keysize);
505 seq_printf(m, "max keysize : %u\n", ablkcipher->max_keysize);
506 seq_printf(m, "ivsize : %u\n", ablkcipher->ivsize);
507 seq_printf(m, "geniv : %s\n", ablkcipher->geniv ?: "<built-in>");
510 const struct crypto_type crypto_givcipher_type = {
511 .ctxsize = crypto_ablkcipher_ctxsize,
512 .init = crypto_init_givcipher_ops,
513 #ifdef CONFIG_PROC_FS
514 .show = crypto_givcipher_show,
515 #endif
516 .report = crypto_givcipher_report,
518 EXPORT_SYMBOL_GPL(crypto_givcipher_type);
520 const char *crypto_default_geniv(const struct crypto_alg *alg)
522 if (((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) ==
523 CRYPTO_ALG_TYPE_BLKCIPHER ? alg->cra_blkcipher.ivsize :
524 alg->cra_ablkcipher.ivsize) !=
525 alg->cra_blocksize)
526 return "chainiv";
528 return "eseqiv";
531 static int crypto_givcipher_default(struct crypto_alg *alg, u32 type, u32 mask)
533 struct rtattr *tb[3];
534 struct {
535 struct rtattr attr;
536 struct crypto_attr_type data;
537 } ptype;
538 struct {
539 struct rtattr attr;
540 struct crypto_attr_alg data;
541 } palg;
542 struct crypto_template *tmpl;
543 struct crypto_instance *inst;
544 struct crypto_alg *larval;
545 const char *geniv;
546 int err;
548 larval = crypto_larval_lookup(alg->cra_driver_name,
549 (type & ~CRYPTO_ALG_TYPE_MASK) |
550 CRYPTO_ALG_TYPE_GIVCIPHER,
551 mask | CRYPTO_ALG_TYPE_MASK);
552 err = PTR_ERR(larval);
553 if (IS_ERR(larval))
554 goto out;
556 err = -EAGAIN;
557 if (!crypto_is_larval(larval))
558 goto drop_larval;
560 ptype.attr.rta_len = sizeof(ptype);
561 ptype.attr.rta_type = CRYPTOA_TYPE;
562 ptype.data.type = type | CRYPTO_ALG_GENIV;
563 /* GENIV tells the template that we're making a default geniv. */
564 ptype.data.mask = mask | CRYPTO_ALG_GENIV;
565 tb[0] = &ptype.attr;
567 palg.attr.rta_len = sizeof(palg);
568 palg.attr.rta_type = CRYPTOA_ALG;
569 /* Must use the exact name to locate ourselves. */
570 memcpy(palg.data.name, alg->cra_driver_name, CRYPTO_MAX_ALG_NAME);
571 tb[1] = &palg.attr;
573 tb[2] = NULL;
575 if ((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) ==
576 CRYPTO_ALG_TYPE_BLKCIPHER)
577 geniv = alg->cra_blkcipher.geniv;
578 else
579 geniv = alg->cra_ablkcipher.geniv;
581 if (!geniv)
582 geniv = crypto_default_geniv(alg);
584 tmpl = crypto_lookup_template(geniv);
585 err = -ENOENT;
586 if (!tmpl)
587 goto kill_larval;
589 inst = tmpl->alloc(tb);
590 err = PTR_ERR(inst);
591 if (IS_ERR(inst))
592 goto put_tmpl;
594 if ((err = crypto_register_instance(tmpl, inst))) {
595 tmpl->free(inst);
596 goto put_tmpl;
599 /* Redo the lookup to use the instance we just registered. */
600 err = -EAGAIN;
602 put_tmpl:
603 crypto_tmpl_put(tmpl);
604 kill_larval:
605 crypto_larval_kill(larval);
606 drop_larval:
607 crypto_mod_put(larval);
608 out:
609 crypto_mod_put(alg);
610 return err;
613 struct crypto_alg *crypto_lookup_skcipher(const char *name, u32 type, u32 mask)
615 struct crypto_alg *alg;
617 alg = crypto_alg_mod_lookup(name, type, mask);
618 if (IS_ERR(alg))
619 return alg;
621 if ((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) ==
622 CRYPTO_ALG_TYPE_GIVCIPHER)
623 return alg;
625 if (!((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) ==
626 CRYPTO_ALG_TYPE_BLKCIPHER ? alg->cra_blkcipher.ivsize :
627 alg->cra_ablkcipher.ivsize))
628 return alg;
630 crypto_mod_put(alg);
631 alg = crypto_alg_mod_lookup(name, type | CRYPTO_ALG_TESTED,
632 mask & ~CRYPTO_ALG_TESTED);
633 if (IS_ERR(alg))
634 return alg;
636 if ((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) ==
637 CRYPTO_ALG_TYPE_GIVCIPHER) {
638 if ((alg->cra_flags ^ type ^ ~mask) & CRYPTO_ALG_TESTED) {
639 crypto_mod_put(alg);
640 alg = ERR_PTR(-ENOENT);
642 return alg;
645 BUG_ON(!((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) ==
646 CRYPTO_ALG_TYPE_BLKCIPHER ? alg->cra_blkcipher.ivsize :
647 alg->cra_ablkcipher.ivsize));
649 return ERR_PTR(crypto_givcipher_default(alg, type, mask));
651 EXPORT_SYMBOL_GPL(crypto_lookup_skcipher);
653 int crypto_grab_skcipher(struct crypto_skcipher_spawn *spawn, const char *name,
654 u32 type, u32 mask)
656 struct crypto_alg *alg;
657 int err;
659 type = crypto_skcipher_type(type);
660 mask = crypto_skcipher_mask(mask);
662 alg = crypto_lookup_skcipher(name, type, mask);
663 if (IS_ERR(alg))
664 return PTR_ERR(alg);
666 err = crypto_init_spawn(&spawn->base, alg, spawn->base.inst, mask);
667 crypto_mod_put(alg);
668 return err;
670 EXPORT_SYMBOL_GPL(crypto_grab_skcipher);
672 struct crypto_ablkcipher *crypto_alloc_ablkcipher(const char *alg_name,
673 u32 type, u32 mask)
675 struct crypto_tfm *tfm;
676 int err;
678 type = crypto_skcipher_type(type);
679 mask = crypto_skcipher_mask(mask);
681 for (;;) {
682 struct crypto_alg *alg;
684 alg = crypto_lookup_skcipher(alg_name, type, mask);
685 if (IS_ERR(alg)) {
686 err = PTR_ERR(alg);
687 goto err;
690 tfm = __crypto_alloc_tfm(alg, type, mask);
691 if (!IS_ERR(tfm))
692 return __crypto_ablkcipher_cast(tfm);
694 crypto_mod_put(alg);
695 err = PTR_ERR(tfm);
697 err:
698 if (err != -EAGAIN)
699 break;
700 if (fatal_signal_pending(current)) {
701 err = -EINTR;
702 break;
706 return ERR_PTR(err);
708 EXPORT_SYMBOL_GPL(crypto_alloc_ablkcipher);