2 * algif_skcipher: User-space interface for skcipher algorithms
4 * This file provides the user-space API for symmetric key ciphers.
6 * Copyright (c) 2010 Herbert Xu <herbert@gondor.apana.org.au>
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the Free
10 * Software Foundation; either version 2 of the License, or (at your option)
15 #include <crypto/scatterwalk.h>
16 #include <crypto/skcipher.h>
17 #include <crypto/if_alg.h>
18 #include <linux/init.h>
19 #include <linux/list.h>
20 #include <linux/kernel.h>
22 #include <linux/module.h>
23 #include <linux/net.h>
26 struct skcipher_sg_list
{
27 struct list_head list
;
31 struct scatterlist sg
[0];
35 struct crypto_skcipher
*skcipher
;
40 struct list_head tsgl
;
41 struct af_alg_sgl rsgl
;
45 struct af_alg_completion completion
;
55 struct skcipher_request req
;
58 struct skcipher_async_rsgl
{
59 struct af_alg_sgl sgl
;
60 struct list_head list
;
63 struct skcipher_async_req
{
65 struct skcipher_async_rsgl first_sgl
;
66 struct list_head list
;
67 struct scatterlist
*tsg
;
69 struct skcipher_request req
;
72 #define MAX_SGL_ENTS ((4096 - sizeof(struct skcipher_sg_list)) / \
73 sizeof(struct scatterlist) - 1)
75 static void skcipher_free_async_sgls(struct skcipher_async_req
*sreq
)
77 struct skcipher_async_rsgl
*rsgl
, *tmp
;
78 struct scatterlist
*sgl
;
79 struct scatterlist
*sg
;
82 list_for_each_entry_safe(rsgl
, tmp
, &sreq
->list
, list
) {
83 af_alg_free_sg(&rsgl
->sgl
);
84 if (rsgl
!= &sreq
->first_sgl
)
89 for_each_sg(sgl
, sg
, n
, i
) {
90 struct page
*page
= sg_page(sg
);
92 /* some SGs may not have a page mapped */
93 if (page
&& page_ref_count(page
))
100 static void skcipher_async_cb(struct crypto_async_request
*req
, int err
)
102 struct skcipher_async_req
*sreq
= req
->data
;
103 struct kiocb
*iocb
= sreq
->iocb
;
105 atomic_dec(sreq
->inflight
);
106 skcipher_free_async_sgls(sreq
);
108 iocb
->ki_complete(iocb
, err
, err
);
111 static inline int skcipher_sndbuf(struct sock
*sk
)
113 struct alg_sock
*ask
= alg_sk(sk
);
114 struct skcipher_ctx
*ctx
= ask
->private;
116 return max_t(int, max_t(int, sk
->sk_sndbuf
& PAGE_MASK
, PAGE_SIZE
) -
120 static inline bool skcipher_writable(struct sock
*sk
)
122 return PAGE_SIZE
<= skcipher_sndbuf(sk
);
125 static int skcipher_alloc_sgl(struct sock
*sk
)
127 struct alg_sock
*ask
= alg_sk(sk
);
128 struct skcipher_ctx
*ctx
= ask
->private;
129 struct skcipher_sg_list
*sgl
;
130 struct scatterlist
*sg
= NULL
;
132 sgl
= list_entry(ctx
->tsgl
.prev
, struct skcipher_sg_list
, list
);
133 if (!list_empty(&ctx
->tsgl
))
136 if (!sg
|| sgl
->cur
>= MAX_SGL_ENTS
) {
137 sgl
= sock_kmalloc(sk
, sizeof(*sgl
) +
138 sizeof(sgl
->sg
[0]) * (MAX_SGL_ENTS
+ 1),
143 sg_init_table(sgl
->sg
, MAX_SGL_ENTS
+ 1);
147 sg_chain(sg
, MAX_SGL_ENTS
+ 1, sgl
->sg
);
148 sg_unmark_end(sg
+ (MAX_SGL_ENTS
- 1));
151 list_add_tail(&sgl
->list
, &ctx
->tsgl
);
157 static void skcipher_pull_sgl(struct sock
*sk
, size_t used
, int put
)
159 struct alg_sock
*ask
= alg_sk(sk
);
160 struct skcipher_ctx
*ctx
= ask
->private;
161 struct skcipher_sg_list
*sgl
;
162 struct scatterlist
*sg
;
165 while (!list_empty(&ctx
->tsgl
)) {
166 sgl
= list_first_entry(&ctx
->tsgl
, struct skcipher_sg_list
,
170 for (i
= 0; i
< sgl
->cur
; i
++) {
171 size_t plen
= min_t(size_t, used
, sg
[i
].length
);
173 if (!sg_page(sg
+ i
))
176 sg
[i
].length
-= plen
;
177 sg
[i
].offset
+= plen
;
185 put_page(sg_page(sg
+ i
));
186 sg_assign_page(sg
+ i
, NULL
);
189 list_del(&sgl
->list
);
190 sock_kfree_s(sk
, sgl
,
191 sizeof(*sgl
) + sizeof(sgl
->sg
[0]) *
199 static void skcipher_free_sgl(struct sock
*sk
)
201 struct alg_sock
*ask
= alg_sk(sk
);
202 struct skcipher_ctx
*ctx
= ask
->private;
204 skcipher_pull_sgl(sk
, ctx
->used
, 1);
207 static int skcipher_wait_for_wmem(struct sock
*sk
, unsigned flags
)
211 int err
= -ERESTARTSYS
;
213 if (flags
& MSG_DONTWAIT
)
216 sk_set_bit(SOCKWQ_ASYNC_NOSPACE
, sk
);
219 if (signal_pending(current
))
221 prepare_to_wait(sk_sleep(sk
), &wait
, TASK_INTERRUPTIBLE
);
222 timeout
= MAX_SCHEDULE_TIMEOUT
;
223 if (sk_wait_event(sk
, &timeout
, skcipher_writable(sk
))) {
228 finish_wait(sk_sleep(sk
), &wait
);
233 static void skcipher_wmem_wakeup(struct sock
*sk
)
235 struct socket_wq
*wq
;
237 if (!skcipher_writable(sk
))
241 wq
= rcu_dereference(sk
->sk_wq
);
242 if (skwq_has_sleeper(wq
))
243 wake_up_interruptible_sync_poll(&wq
->wait
, POLLIN
|
246 sk_wake_async(sk
, SOCK_WAKE_WAITD
, POLL_IN
);
250 static int skcipher_wait_for_data(struct sock
*sk
, unsigned flags
)
252 struct alg_sock
*ask
= alg_sk(sk
);
253 struct skcipher_ctx
*ctx
= ask
->private;
256 int err
= -ERESTARTSYS
;
258 if (flags
& MSG_DONTWAIT
) {
262 sk_set_bit(SOCKWQ_ASYNC_WAITDATA
, sk
);
265 if (signal_pending(current
))
267 prepare_to_wait(sk_sleep(sk
), &wait
, TASK_INTERRUPTIBLE
);
268 timeout
= MAX_SCHEDULE_TIMEOUT
;
269 if (sk_wait_event(sk
, &timeout
, ctx
->used
)) {
274 finish_wait(sk_sleep(sk
), &wait
);
276 sk_clear_bit(SOCKWQ_ASYNC_WAITDATA
, sk
);
281 static void skcipher_data_wakeup(struct sock
*sk
)
283 struct alg_sock
*ask
= alg_sk(sk
);
284 struct skcipher_ctx
*ctx
= ask
->private;
285 struct socket_wq
*wq
;
291 wq
= rcu_dereference(sk
->sk_wq
);
292 if (skwq_has_sleeper(wq
))
293 wake_up_interruptible_sync_poll(&wq
->wait
, POLLOUT
|
296 sk_wake_async(sk
, SOCK_WAKE_SPACE
, POLL_OUT
);
300 static int skcipher_sendmsg(struct socket
*sock
, struct msghdr
*msg
,
303 struct sock
*sk
= sock
->sk
;
304 struct alg_sock
*ask
= alg_sk(sk
);
305 struct sock
*psk
= ask
->parent
;
306 struct alg_sock
*pask
= alg_sk(psk
);
307 struct skcipher_ctx
*ctx
= ask
->private;
308 struct skcipher_tfm
*skc
= pask
->private;
309 struct crypto_skcipher
*tfm
= skc
->skcipher
;
310 unsigned ivsize
= crypto_skcipher_ivsize(tfm
);
311 struct skcipher_sg_list
*sgl
;
312 struct af_alg_control con
= {};
319 if (msg
->msg_controllen
) {
320 err
= af_alg_cmsg_send(msg
, &con
);
336 if (con
.iv
&& con
.iv
->ivlen
!= ivsize
)
343 if (!ctx
->more
&& ctx
->used
)
349 memcpy(ctx
->iv
, con
.iv
->iv
, ivsize
);
353 struct scatterlist
*sg
;
354 unsigned long len
= size
;
358 sgl
= list_entry(ctx
->tsgl
.prev
,
359 struct skcipher_sg_list
, list
);
360 sg
= sgl
->sg
+ sgl
->cur
- 1;
361 len
= min_t(unsigned long, len
,
362 PAGE_SIZE
- sg
->offset
- sg
->length
);
364 err
= memcpy_from_msg(page_address(sg_page(sg
)) +
365 sg
->offset
+ sg
->length
,
371 ctx
->merge
= (sg
->offset
+ sg
->length
) &
380 if (!skcipher_writable(sk
)) {
381 err
= skcipher_wait_for_wmem(sk
, msg
->msg_flags
);
386 len
= min_t(unsigned long, len
, skcipher_sndbuf(sk
));
388 err
= skcipher_alloc_sgl(sk
);
392 sgl
= list_entry(ctx
->tsgl
.prev
, struct skcipher_sg_list
, list
);
395 sg_unmark_end(sg
+ sgl
->cur
- 1);
398 plen
= min_t(size_t, len
, PAGE_SIZE
);
400 sg_assign_page(sg
+ i
, alloc_page(GFP_KERNEL
));
402 if (!sg_page(sg
+ i
))
405 err
= memcpy_from_msg(page_address(sg_page(sg
+ i
)),
408 __free_page(sg_page(sg
+ i
));
409 sg_assign_page(sg
+ i
, NULL
);
419 } while (len
&& sgl
->cur
< MAX_SGL_ENTS
);
422 sg_mark_end(sg
+ sgl
->cur
- 1);
424 ctx
->merge
= plen
& (PAGE_SIZE
- 1);
429 ctx
->more
= msg
->msg_flags
& MSG_MORE
;
432 skcipher_data_wakeup(sk
);
435 return copied
?: err
;
438 static ssize_t
skcipher_sendpage(struct socket
*sock
, struct page
*page
,
439 int offset
, size_t size
, int flags
)
441 struct sock
*sk
= sock
->sk
;
442 struct alg_sock
*ask
= alg_sk(sk
);
443 struct skcipher_ctx
*ctx
= ask
->private;
444 struct skcipher_sg_list
*sgl
;
447 if (flags
& MSG_SENDPAGE_NOTLAST
)
451 if (!ctx
->more
&& ctx
->used
)
457 if (!skcipher_writable(sk
)) {
458 err
= skcipher_wait_for_wmem(sk
, flags
);
463 err
= skcipher_alloc_sgl(sk
);
468 sgl
= list_entry(ctx
->tsgl
.prev
, struct skcipher_sg_list
, list
);
471 sg_unmark_end(sgl
->sg
+ sgl
->cur
- 1);
473 sg_mark_end(sgl
->sg
+ sgl
->cur
);
475 sg_set_page(sgl
->sg
+ sgl
->cur
, page
, size
, offset
);
480 ctx
->more
= flags
& MSG_MORE
;
483 skcipher_data_wakeup(sk
);
489 static int skcipher_all_sg_nents(struct skcipher_ctx
*ctx
)
491 struct skcipher_sg_list
*sgl
;
492 struct scatterlist
*sg
;
495 list_for_each_entry(sgl
, &ctx
->tsgl
, list
) {
501 nents
+= sg_nents(sg
);
506 static int skcipher_recvmsg_async(struct socket
*sock
, struct msghdr
*msg
,
509 struct sock
*sk
= sock
->sk
;
510 struct alg_sock
*ask
= alg_sk(sk
);
511 struct sock
*psk
= ask
->parent
;
512 struct alg_sock
*pask
= alg_sk(psk
);
513 struct skcipher_ctx
*ctx
= ask
->private;
514 struct skcipher_tfm
*skc
= pask
->private;
515 struct crypto_skcipher
*tfm
= skc
->skcipher
;
516 struct skcipher_sg_list
*sgl
;
517 struct scatterlist
*sg
;
518 struct skcipher_async_req
*sreq
;
519 struct skcipher_request
*req
;
520 struct skcipher_async_rsgl
*last_rsgl
= NULL
;
521 unsigned int txbufs
= 0, len
= 0, tx_nents
;
522 unsigned int reqsize
= crypto_skcipher_reqsize(tfm
);
523 unsigned int ivsize
= crypto_skcipher_ivsize(tfm
);
528 sreq
= kzalloc(sizeof(*sreq
) + reqsize
+ ivsize
, GFP_KERNEL
);
533 iv
= (char *)(req
+ 1) + reqsize
;
534 sreq
->iocb
= msg
->msg_iocb
;
535 INIT_LIST_HEAD(&sreq
->list
);
536 sreq
->inflight
= &ctx
->inflight
;
539 tx_nents
= skcipher_all_sg_nents(ctx
);
540 sreq
->tsg
= kcalloc(tx_nents
, sizeof(*sg
), GFP_KERNEL
);
541 if (unlikely(!sreq
->tsg
))
543 sg_init_table(sreq
->tsg
, tx_nents
);
544 memcpy(iv
, ctx
->iv
, ivsize
);
545 skcipher_request_set_tfm(req
, tfm
);
546 skcipher_request_set_callback(req
, CRYPTO_TFM_REQ_MAY_SLEEP
,
547 skcipher_async_cb
, sreq
);
549 while (iov_iter_count(&msg
->msg_iter
)) {
550 struct skcipher_async_rsgl
*rsgl
;
554 err
= skcipher_wait_for_data(sk
, flags
);
558 sgl
= list_first_entry(&ctx
->tsgl
,
559 struct skcipher_sg_list
, list
);
565 used
= min_t(unsigned long, ctx
->used
,
566 iov_iter_count(&msg
->msg_iter
));
567 used
= min_t(unsigned long, used
, sg
->length
);
569 if (txbufs
== tx_nents
) {
570 struct scatterlist
*tmp
;
572 /* Ran out of tx slots in async request
574 tmp
= kcalloc(tx_nents
* 2, sizeof(*tmp
),
579 sg_init_table(tmp
, tx_nents
* 2);
580 for (x
= 0; x
< tx_nents
; x
++)
581 sg_set_page(&tmp
[x
], sg_page(&sreq
->tsg
[x
]),
583 sreq
->tsg
[x
].offset
);
589 /* Need to take over the tx sgl from ctx
590 * to the asynch req - these sgls will be freed later */
591 sg_set_page(sreq
->tsg
+ txbufs
++, sg_page(sg
), sg
->length
,
594 if (list_empty(&sreq
->list
)) {
595 rsgl
= &sreq
->first_sgl
;
596 list_add_tail(&rsgl
->list
, &sreq
->list
);
598 rsgl
= kmalloc(sizeof(*rsgl
), GFP_KERNEL
);
603 list_add_tail(&rsgl
->list
, &sreq
->list
);
606 used
= af_alg_make_sg(&rsgl
->sgl
, &msg
->msg_iter
, used
);
611 af_alg_link_sg(&last_rsgl
->sgl
, &rsgl
->sgl
);
615 skcipher_pull_sgl(sk
, used
, 0);
616 iov_iter_advance(&msg
->msg_iter
, used
);
620 sg_mark_end(sreq
->tsg
+ txbufs
- 1);
622 skcipher_request_set_crypt(req
, sreq
->tsg
, sreq
->first_sgl
.sgl
.sg
,
624 err
= ctx
->enc
? crypto_skcipher_encrypt(req
) :
625 crypto_skcipher_decrypt(req
);
626 if (err
== -EINPROGRESS
) {
627 atomic_inc(&ctx
->inflight
);
633 skcipher_free_async_sgls(sreq
);
635 skcipher_wmem_wakeup(sk
);
642 static int skcipher_recvmsg_sync(struct socket
*sock
, struct msghdr
*msg
,
645 struct sock
*sk
= sock
->sk
;
646 struct alg_sock
*ask
= alg_sk(sk
);
647 struct sock
*psk
= ask
->parent
;
648 struct alg_sock
*pask
= alg_sk(psk
);
649 struct skcipher_ctx
*ctx
= ask
->private;
650 struct skcipher_tfm
*skc
= pask
->private;
651 struct crypto_skcipher
*tfm
= skc
->skcipher
;
652 unsigned bs
= crypto_skcipher_blocksize(tfm
);
653 struct skcipher_sg_list
*sgl
;
654 struct scatterlist
*sg
;
660 while (msg_data_left(msg
)) {
662 err
= skcipher_wait_for_data(sk
, flags
);
667 used
= min_t(unsigned long, ctx
->used
, msg_data_left(msg
));
669 used
= af_alg_make_sg(&ctx
->rsgl
, &msg
->msg_iter
, used
);
674 if (ctx
->more
|| used
< ctx
->used
)
681 sgl
= list_first_entry(&ctx
->tsgl
,
682 struct skcipher_sg_list
, list
);
688 skcipher_request_set_crypt(&ctx
->req
, sg
, ctx
->rsgl
.sg
, used
,
691 err
= af_alg_wait_for_completion(
693 crypto_skcipher_encrypt(&ctx
->req
) :
694 crypto_skcipher_decrypt(&ctx
->req
),
698 af_alg_free_sg(&ctx
->rsgl
);
704 skcipher_pull_sgl(sk
, used
, 1);
705 iov_iter_advance(&msg
->msg_iter
, used
);
711 skcipher_wmem_wakeup(sk
);
714 return copied
?: err
;
717 static int skcipher_recvmsg(struct socket
*sock
, struct msghdr
*msg
,
718 size_t ignored
, int flags
)
720 return (msg
->msg_iocb
&& !is_sync_kiocb(msg
->msg_iocb
)) ?
721 skcipher_recvmsg_async(sock
, msg
, flags
) :
722 skcipher_recvmsg_sync(sock
, msg
, flags
);
725 static unsigned int skcipher_poll(struct file
*file
, struct socket
*sock
,
728 struct sock
*sk
= sock
->sk
;
729 struct alg_sock
*ask
= alg_sk(sk
);
730 struct skcipher_ctx
*ctx
= ask
->private;
733 sock_poll_wait(file
, sk_sleep(sk
), wait
);
737 mask
|= POLLIN
| POLLRDNORM
;
739 if (skcipher_writable(sk
))
740 mask
|= POLLOUT
| POLLWRNORM
| POLLWRBAND
;
745 static struct proto_ops algif_skcipher_ops
= {
748 .connect
= sock_no_connect
,
749 .socketpair
= sock_no_socketpair
,
750 .getname
= sock_no_getname
,
751 .ioctl
= sock_no_ioctl
,
752 .listen
= sock_no_listen
,
753 .shutdown
= sock_no_shutdown
,
754 .getsockopt
= sock_no_getsockopt
,
755 .mmap
= sock_no_mmap
,
756 .bind
= sock_no_bind
,
757 .accept
= sock_no_accept
,
758 .setsockopt
= sock_no_setsockopt
,
760 .release
= af_alg_release
,
761 .sendmsg
= skcipher_sendmsg
,
762 .sendpage
= skcipher_sendpage
,
763 .recvmsg
= skcipher_recvmsg
,
764 .poll
= skcipher_poll
,
767 static int skcipher_check_key(struct socket
*sock
)
771 struct alg_sock
*pask
;
772 struct skcipher_tfm
*tfm
;
773 struct sock
*sk
= sock
->sk
;
774 struct alg_sock
*ask
= alg_sk(sk
);
781 pask
= alg_sk(ask
->parent
);
785 lock_sock_nested(psk
, SINGLE_DEPTH_NESTING
);
805 static int skcipher_sendmsg_nokey(struct socket
*sock
, struct msghdr
*msg
,
810 err
= skcipher_check_key(sock
);
814 return skcipher_sendmsg(sock
, msg
, size
);
817 static ssize_t
skcipher_sendpage_nokey(struct socket
*sock
, struct page
*page
,
818 int offset
, size_t size
, int flags
)
822 err
= skcipher_check_key(sock
);
826 return skcipher_sendpage(sock
, page
, offset
, size
, flags
);
829 static int skcipher_recvmsg_nokey(struct socket
*sock
, struct msghdr
*msg
,
830 size_t ignored
, int flags
)
834 err
= skcipher_check_key(sock
);
838 return skcipher_recvmsg(sock
, msg
, ignored
, flags
);
841 static struct proto_ops algif_skcipher_ops_nokey
= {
844 .connect
= sock_no_connect
,
845 .socketpair
= sock_no_socketpair
,
846 .getname
= sock_no_getname
,
847 .ioctl
= sock_no_ioctl
,
848 .listen
= sock_no_listen
,
849 .shutdown
= sock_no_shutdown
,
850 .getsockopt
= sock_no_getsockopt
,
851 .mmap
= sock_no_mmap
,
852 .bind
= sock_no_bind
,
853 .accept
= sock_no_accept
,
854 .setsockopt
= sock_no_setsockopt
,
856 .release
= af_alg_release
,
857 .sendmsg
= skcipher_sendmsg_nokey
,
858 .sendpage
= skcipher_sendpage_nokey
,
859 .recvmsg
= skcipher_recvmsg_nokey
,
860 .poll
= skcipher_poll
,
863 static void *skcipher_bind(const char *name
, u32 type
, u32 mask
)
865 struct skcipher_tfm
*tfm
;
866 struct crypto_skcipher
*skcipher
;
868 tfm
= kzalloc(sizeof(*tfm
), GFP_KERNEL
);
870 return ERR_PTR(-ENOMEM
);
872 skcipher
= crypto_alloc_skcipher(name
, type
, mask
);
873 if (IS_ERR(skcipher
)) {
875 return ERR_CAST(skcipher
);
878 tfm
->skcipher
= skcipher
;
883 static void skcipher_release(void *private)
885 struct skcipher_tfm
*tfm
= private;
887 crypto_free_skcipher(tfm
->skcipher
);
891 static int skcipher_setkey(void *private, const u8
*key
, unsigned int keylen
)
893 struct skcipher_tfm
*tfm
= private;
896 err
= crypto_skcipher_setkey(tfm
->skcipher
, key
, keylen
);
902 static void skcipher_wait(struct sock
*sk
)
904 struct alg_sock
*ask
= alg_sk(sk
);
905 struct skcipher_ctx
*ctx
= ask
->private;
908 while (atomic_read(&ctx
->inflight
) && ctr
++ < 100)
912 static void skcipher_sock_destruct(struct sock
*sk
)
914 struct alg_sock
*ask
= alg_sk(sk
);
915 struct skcipher_ctx
*ctx
= ask
->private;
916 struct crypto_skcipher
*tfm
= crypto_skcipher_reqtfm(&ctx
->req
);
918 if (atomic_read(&ctx
->inflight
))
921 skcipher_free_sgl(sk
);
922 sock_kzfree_s(sk
, ctx
->iv
, crypto_skcipher_ivsize(tfm
));
923 sock_kfree_s(sk
, ctx
, ctx
->len
);
924 af_alg_release_parent(sk
);
927 static int skcipher_accept_parent_nokey(void *private, struct sock
*sk
)
929 struct skcipher_ctx
*ctx
;
930 struct alg_sock
*ask
= alg_sk(sk
);
931 struct skcipher_tfm
*tfm
= private;
932 struct crypto_skcipher
*skcipher
= tfm
->skcipher
;
933 unsigned int len
= sizeof(*ctx
) + crypto_skcipher_reqsize(skcipher
);
935 ctx
= sock_kmalloc(sk
, len
, GFP_KERNEL
);
939 ctx
->iv
= sock_kmalloc(sk
, crypto_skcipher_ivsize(skcipher
),
942 sock_kfree_s(sk
, ctx
, len
);
946 memset(ctx
->iv
, 0, crypto_skcipher_ivsize(skcipher
));
948 INIT_LIST_HEAD(&ctx
->tsgl
);
954 atomic_set(&ctx
->inflight
, 0);
955 af_alg_init_completion(&ctx
->completion
);
959 skcipher_request_set_tfm(&ctx
->req
, skcipher
);
960 skcipher_request_set_callback(&ctx
->req
, CRYPTO_TFM_REQ_MAY_SLEEP
|
961 CRYPTO_TFM_REQ_MAY_BACKLOG
,
962 af_alg_complete
, &ctx
->completion
);
964 sk
->sk_destruct
= skcipher_sock_destruct
;
969 static int skcipher_accept_parent(void *private, struct sock
*sk
)
971 struct skcipher_tfm
*tfm
= private;
973 if (!tfm
->has_key
&& crypto_skcipher_has_setkey(tfm
->skcipher
))
976 return skcipher_accept_parent_nokey(private, sk
);
979 static const struct af_alg_type algif_type_skcipher
= {
980 .bind
= skcipher_bind
,
981 .release
= skcipher_release
,
982 .setkey
= skcipher_setkey
,
983 .accept
= skcipher_accept_parent
,
984 .accept_nokey
= skcipher_accept_parent_nokey
,
985 .ops
= &algif_skcipher_ops
,
986 .ops_nokey
= &algif_skcipher_ops_nokey
,
991 static int __init
algif_skcipher_init(void)
993 return af_alg_register_type(&algif_type_skcipher
);
996 static void __exit
algif_skcipher_exit(void)
998 int err
= af_alg_unregister_type(&algif_type_skcipher
);
1002 module_init(algif_skcipher_init
);
1003 module_exit(algif_skcipher_exit
);
1004 MODULE_LICENSE("GPL");