2 * algif_skcipher: User-space interface for skcipher algorithms
4 * This file provides the user-space API for symmetric key ciphers.
6 * Copyright (c) 2010 Herbert Xu <herbert@gondor.apana.org.au>
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the Free
10 * Software Foundation; either version 2 of the License, or (at your option)
15 #include <crypto/scatterwalk.h>
16 #include <crypto/skcipher.h>
17 #include <crypto/if_alg.h>
18 #include <linux/init.h>
19 #include <linux/list.h>
20 #include <linux/kernel.h>
22 #include <linux/module.h>
23 #include <linux/net.h>
26 struct skcipher_sg_list
{
27 struct list_head list
;
31 struct scatterlist sg
[0];
35 struct list_head tsgl
;
36 struct af_alg_sgl rsgl
;
40 struct af_alg_completion completion
;
50 struct ablkcipher_request req
;
53 struct skcipher_async_rsgl
{
54 struct af_alg_sgl sgl
;
55 struct list_head list
;
58 struct skcipher_async_req
{
60 struct skcipher_async_rsgl first_sgl
;
61 struct list_head list
;
62 struct scatterlist
*tsg
;
66 #define GET_SREQ(areq, ctx) (struct skcipher_async_req *)((char *)areq + \
67 crypto_ablkcipher_reqsize(crypto_ablkcipher_reqtfm(&ctx->req)))
69 #define GET_REQ_SIZE(ctx) \
70 crypto_ablkcipher_reqsize(crypto_ablkcipher_reqtfm(&ctx->req))
72 #define GET_IV_SIZE(ctx) \
73 crypto_ablkcipher_ivsize(crypto_ablkcipher_reqtfm(&ctx->req))
75 #define MAX_SGL_ENTS ((4096 - sizeof(struct skcipher_sg_list)) / \
76 sizeof(struct scatterlist) - 1)
78 static void skcipher_free_async_sgls(struct skcipher_async_req
*sreq
)
80 struct skcipher_async_rsgl
*rsgl
, *tmp
;
81 struct scatterlist
*sgl
;
82 struct scatterlist
*sg
;
85 list_for_each_entry_safe(rsgl
, tmp
, &sreq
->list
, list
) {
86 af_alg_free_sg(&rsgl
->sgl
);
87 if (rsgl
!= &sreq
->first_sgl
)
92 for_each_sg(sgl
, sg
, n
, i
)
93 put_page(sg_page(sg
));
98 static void skcipher_async_cb(struct crypto_async_request
*req
, int err
)
100 struct sock
*sk
= req
->data
;
101 struct alg_sock
*ask
= alg_sk(sk
);
102 struct skcipher_ctx
*ctx
= ask
->private;
103 struct skcipher_async_req
*sreq
= GET_SREQ(req
, ctx
);
104 struct kiocb
*iocb
= sreq
->iocb
;
106 atomic_dec(&ctx
->inflight
);
107 skcipher_free_async_sgls(sreq
);
109 aio_complete(iocb
, err
, err
);
112 static inline int skcipher_sndbuf(struct sock
*sk
)
114 struct alg_sock
*ask
= alg_sk(sk
);
115 struct skcipher_ctx
*ctx
= ask
->private;
117 return max_t(int, max_t(int, sk
->sk_sndbuf
& PAGE_MASK
, PAGE_SIZE
) -
121 static inline bool skcipher_writable(struct sock
*sk
)
123 return PAGE_SIZE
<= skcipher_sndbuf(sk
);
126 static int skcipher_alloc_sgl(struct sock
*sk
)
128 struct alg_sock
*ask
= alg_sk(sk
);
129 struct skcipher_ctx
*ctx
= ask
->private;
130 struct skcipher_sg_list
*sgl
;
131 struct scatterlist
*sg
= NULL
;
133 sgl
= list_entry(ctx
->tsgl
.prev
, struct skcipher_sg_list
, list
);
134 if (!list_empty(&ctx
->tsgl
))
137 if (!sg
|| sgl
->cur
>= MAX_SGL_ENTS
) {
138 sgl
= sock_kmalloc(sk
, sizeof(*sgl
) +
139 sizeof(sgl
->sg
[0]) * (MAX_SGL_ENTS
+ 1),
144 sg_init_table(sgl
->sg
, MAX_SGL_ENTS
+ 1);
148 scatterwalk_sg_chain(sg
, MAX_SGL_ENTS
+ 1, sgl
->sg
);
150 list_add_tail(&sgl
->list
, &ctx
->tsgl
);
156 static void skcipher_pull_sgl(struct sock
*sk
, int used
, int put
)
158 struct alg_sock
*ask
= alg_sk(sk
);
159 struct skcipher_ctx
*ctx
= ask
->private;
160 struct skcipher_sg_list
*sgl
;
161 struct scatterlist
*sg
;
164 while (!list_empty(&ctx
->tsgl
)) {
165 sgl
= list_first_entry(&ctx
->tsgl
, struct skcipher_sg_list
,
169 for (i
= 0; i
< sgl
->cur
; i
++) {
170 int plen
= min_t(int, used
, sg
[i
].length
);
172 if (!sg_page(sg
+ i
))
175 sg
[i
].length
-= plen
;
176 sg
[i
].offset
+= plen
;
184 put_page(sg_page(sg
+ i
));
185 sg_assign_page(sg
+ i
, NULL
);
188 list_del(&sgl
->list
);
189 sock_kfree_s(sk
, sgl
,
190 sizeof(*sgl
) + sizeof(sgl
->sg
[0]) *
198 static void skcipher_free_sgl(struct sock
*sk
)
200 struct alg_sock
*ask
= alg_sk(sk
);
201 struct skcipher_ctx
*ctx
= ask
->private;
203 skcipher_pull_sgl(sk
, ctx
->used
, 1);
206 static int skcipher_wait_for_wmem(struct sock
*sk
, unsigned flags
)
210 int err
= -ERESTARTSYS
;
212 if (flags
& MSG_DONTWAIT
)
215 set_bit(SOCK_ASYNC_NOSPACE
, &sk
->sk_socket
->flags
);
218 if (signal_pending(current
))
220 prepare_to_wait(sk_sleep(sk
), &wait
, TASK_INTERRUPTIBLE
);
221 timeout
= MAX_SCHEDULE_TIMEOUT
;
222 if (sk_wait_event(sk
, &timeout
, skcipher_writable(sk
))) {
227 finish_wait(sk_sleep(sk
), &wait
);
232 static void skcipher_wmem_wakeup(struct sock
*sk
)
234 struct socket_wq
*wq
;
236 if (!skcipher_writable(sk
))
240 wq
= rcu_dereference(sk
->sk_wq
);
241 if (wq_has_sleeper(wq
))
242 wake_up_interruptible_sync_poll(&wq
->wait
, POLLIN
|
245 sk_wake_async(sk
, SOCK_WAKE_WAITD
, POLL_IN
);
249 static int skcipher_wait_for_data(struct sock
*sk
, unsigned flags
)
251 struct alg_sock
*ask
= alg_sk(sk
);
252 struct skcipher_ctx
*ctx
= ask
->private;
255 int err
= -ERESTARTSYS
;
257 if (flags
& MSG_DONTWAIT
) {
261 set_bit(SOCK_ASYNC_WAITDATA
, &sk
->sk_socket
->flags
);
264 if (signal_pending(current
))
266 prepare_to_wait(sk_sleep(sk
), &wait
, TASK_INTERRUPTIBLE
);
267 timeout
= MAX_SCHEDULE_TIMEOUT
;
268 if (sk_wait_event(sk
, &timeout
, ctx
->used
)) {
273 finish_wait(sk_sleep(sk
), &wait
);
275 clear_bit(SOCK_ASYNC_WAITDATA
, &sk
->sk_socket
->flags
);
280 static void skcipher_data_wakeup(struct sock
*sk
)
282 struct alg_sock
*ask
= alg_sk(sk
);
283 struct skcipher_ctx
*ctx
= ask
->private;
284 struct socket_wq
*wq
;
290 wq
= rcu_dereference(sk
->sk_wq
);
291 if (wq_has_sleeper(wq
))
292 wake_up_interruptible_sync_poll(&wq
->wait
, POLLOUT
|
295 sk_wake_async(sk
, SOCK_WAKE_SPACE
, POLL_OUT
);
299 static int skcipher_sendmsg(struct socket
*sock
, struct msghdr
*msg
,
302 struct sock
*sk
= sock
->sk
;
303 struct alg_sock
*ask
= alg_sk(sk
);
304 struct skcipher_ctx
*ctx
= ask
->private;
305 struct crypto_ablkcipher
*tfm
= crypto_ablkcipher_reqtfm(&ctx
->req
);
306 unsigned ivsize
= crypto_ablkcipher_ivsize(tfm
);
307 struct skcipher_sg_list
*sgl
;
308 struct af_alg_control con
= {};
315 if (msg
->msg_controllen
) {
316 err
= af_alg_cmsg_send(msg
, &con
);
332 if (con
.iv
&& con
.iv
->ivlen
!= ivsize
)
339 if (!ctx
->more
&& ctx
->used
)
345 memcpy(ctx
->iv
, con
.iv
->iv
, ivsize
);
349 struct scatterlist
*sg
;
350 unsigned long len
= size
;
354 sgl
= list_entry(ctx
->tsgl
.prev
,
355 struct skcipher_sg_list
, list
);
356 sg
= sgl
->sg
+ sgl
->cur
- 1;
357 len
= min_t(unsigned long, len
,
358 PAGE_SIZE
- sg
->offset
- sg
->length
);
360 err
= memcpy_from_msg(page_address(sg_page(sg
)) +
361 sg
->offset
+ sg
->length
,
367 ctx
->merge
= (sg
->offset
+ sg
->length
) &
376 if (!skcipher_writable(sk
)) {
377 err
= skcipher_wait_for_wmem(sk
, msg
->msg_flags
);
382 len
= min_t(unsigned long, len
, skcipher_sndbuf(sk
));
384 err
= skcipher_alloc_sgl(sk
);
388 sgl
= list_entry(ctx
->tsgl
.prev
, struct skcipher_sg_list
, list
);
390 sg_unmark_end(sg
+ sgl
->cur
);
393 plen
= min_t(int, len
, PAGE_SIZE
);
395 sg_assign_page(sg
+ i
, alloc_page(GFP_KERNEL
));
397 if (!sg_page(sg
+ i
))
400 err
= memcpy_from_msg(page_address(sg_page(sg
+ i
)),
403 __free_page(sg_page(sg
+ i
));
404 sg_assign_page(sg
+ i
, NULL
);
414 } while (len
&& sgl
->cur
< MAX_SGL_ENTS
);
417 sg_mark_end(sg
+ sgl
->cur
- 1);
419 ctx
->merge
= plen
& (PAGE_SIZE
- 1);
424 ctx
->more
= msg
->msg_flags
& MSG_MORE
;
427 skcipher_data_wakeup(sk
);
430 return copied
?: err
;
433 static ssize_t
skcipher_sendpage(struct socket
*sock
, struct page
*page
,
434 int offset
, size_t size
, int flags
)
436 struct sock
*sk
= sock
->sk
;
437 struct alg_sock
*ask
= alg_sk(sk
);
438 struct skcipher_ctx
*ctx
= ask
->private;
439 struct skcipher_sg_list
*sgl
;
442 if (flags
& MSG_SENDPAGE_NOTLAST
)
446 if (!ctx
->more
&& ctx
->used
)
452 if (!skcipher_writable(sk
)) {
453 err
= skcipher_wait_for_wmem(sk
, flags
);
458 err
= skcipher_alloc_sgl(sk
);
463 sgl
= list_entry(ctx
->tsgl
.prev
, struct skcipher_sg_list
, list
);
466 sg_unmark_end(sgl
->sg
+ sgl
->cur
- 1);
468 sg_mark_end(sgl
->sg
+ sgl
->cur
);
470 sg_set_page(sgl
->sg
+ sgl
->cur
, page
, size
, offset
);
475 ctx
->more
= flags
& MSG_MORE
;
478 skcipher_data_wakeup(sk
);
484 static int skcipher_all_sg_nents(struct skcipher_ctx
*ctx
)
486 struct skcipher_sg_list
*sgl
;
487 struct scatterlist
*sg
;
490 list_for_each_entry(sgl
, &ctx
->tsgl
, list
) {
496 nents
+= sg_nents(sg
);
501 static int skcipher_recvmsg_async(struct socket
*sock
, struct msghdr
*msg
,
504 struct sock
*sk
= sock
->sk
;
505 struct alg_sock
*ask
= alg_sk(sk
);
506 struct skcipher_ctx
*ctx
= ask
->private;
507 struct skcipher_sg_list
*sgl
;
508 struct scatterlist
*sg
;
509 struct skcipher_async_req
*sreq
;
510 struct ablkcipher_request
*req
;
511 struct skcipher_async_rsgl
*last_rsgl
= NULL
;
512 unsigned int txbufs
= 0, len
= 0, tx_nents
= skcipher_all_sg_nents(ctx
);
513 unsigned int reqlen
= sizeof(struct skcipher_async_req
) +
514 GET_REQ_SIZE(ctx
) + GET_IV_SIZE(ctx
);
519 req
= kmalloc(reqlen
, GFP_KERNEL
);
523 sreq
= GET_SREQ(req
, ctx
);
524 sreq
->iocb
= msg
->msg_iocb
;
525 memset(&sreq
->first_sgl
, '\0', sizeof(struct skcipher_async_rsgl
));
526 INIT_LIST_HEAD(&sreq
->list
);
527 sreq
->tsg
= kcalloc(tx_nents
, sizeof(*sg
), GFP_KERNEL
);
528 if (unlikely(!sreq
->tsg
)) {
532 sg_init_table(sreq
->tsg
, tx_nents
);
533 memcpy(sreq
->iv
, ctx
->iv
, GET_IV_SIZE(ctx
));
534 ablkcipher_request_set_tfm(req
, crypto_ablkcipher_reqtfm(&ctx
->req
));
535 ablkcipher_request_set_callback(req
, CRYPTO_TFM_REQ_MAY_BACKLOG
,
536 skcipher_async_cb
, sk
);
538 while (iov_iter_count(&msg
->msg_iter
)) {
539 struct skcipher_async_rsgl
*rsgl
;
543 err
= skcipher_wait_for_data(sk
, flags
);
547 sgl
= list_first_entry(&ctx
->tsgl
,
548 struct skcipher_sg_list
, list
);
554 used
= min_t(unsigned long, ctx
->used
,
555 iov_iter_count(&msg
->msg_iter
));
556 used
= min_t(unsigned long, used
, sg
->length
);
558 if (txbufs
== tx_nents
) {
559 struct scatterlist
*tmp
;
561 /* Ran out of tx slots in async request
563 tmp
= kcalloc(tx_nents
* 2, sizeof(*tmp
),
568 sg_init_table(tmp
, tx_nents
* 2);
569 for (x
= 0; x
< tx_nents
; x
++)
570 sg_set_page(&tmp
[x
], sg_page(&sreq
->tsg
[x
]),
572 sreq
->tsg
[x
].offset
);
578 /* Need to take over the tx sgl from ctx
579 * to the asynch req - these sgls will be freed later */
580 sg_set_page(sreq
->tsg
+ txbufs
++, sg_page(sg
), sg
->length
,
583 if (list_empty(&sreq
->list
)) {
584 rsgl
= &sreq
->first_sgl
;
585 list_add_tail(&rsgl
->list
, &sreq
->list
);
587 rsgl
= kmalloc(sizeof(*rsgl
), GFP_KERNEL
);
592 list_add_tail(&rsgl
->list
, &sreq
->list
);
595 used
= af_alg_make_sg(&rsgl
->sgl
, &msg
->msg_iter
, used
);
600 af_alg_link_sg(&last_rsgl
->sgl
, &rsgl
->sgl
);
604 skcipher_pull_sgl(sk
, used
, 0);
605 iov_iter_advance(&msg
->msg_iter
, used
);
609 sg_mark_end(sreq
->tsg
+ txbufs
- 1);
611 ablkcipher_request_set_crypt(req
, sreq
->tsg
, sreq
->first_sgl
.sgl
.sg
,
613 err
= ctx
->enc
? crypto_ablkcipher_encrypt(req
) :
614 crypto_ablkcipher_decrypt(req
);
615 if (err
== -EINPROGRESS
) {
616 atomic_inc(&ctx
->inflight
);
621 skcipher_free_async_sgls(sreq
);
624 skcipher_wmem_wakeup(sk
);
629 static int skcipher_recvmsg_sync(struct socket
*sock
, struct msghdr
*msg
,
632 struct sock
*sk
= sock
->sk
;
633 struct alg_sock
*ask
= alg_sk(sk
);
634 struct skcipher_ctx
*ctx
= ask
->private;
635 unsigned bs
= crypto_ablkcipher_blocksize(crypto_ablkcipher_reqtfm(
637 struct skcipher_sg_list
*sgl
;
638 struct scatterlist
*sg
;
644 while (iov_iter_count(&msg
->msg_iter
)) {
645 sgl
= list_first_entry(&ctx
->tsgl
,
646 struct skcipher_sg_list
, list
);
653 err
= skcipher_wait_for_data(sk
, flags
);
658 used
= min_t(unsigned long, ctx
->used
, iov_iter_count(&msg
->msg_iter
));
660 used
= af_alg_make_sg(&ctx
->rsgl
, &msg
->msg_iter
, used
);
665 if (ctx
->more
|| used
< ctx
->used
)
672 ablkcipher_request_set_crypt(&ctx
->req
, sg
,
676 err
= af_alg_wait_for_completion(
678 crypto_ablkcipher_encrypt(&ctx
->req
) :
679 crypto_ablkcipher_decrypt(&ctx
->req
),
683 af_alg_free_sg(&ctx
->rsgl
);
689 skcipher_pull_sgl(sk
, used
, 1);
690 iov_iter_advance(&msg
->msg_iter
, used
);
696 skcipher_wmem_wakeup(sk
);
699 return copied
?: err
;
702 static int skcipher_recvmsg(struct socket
*sock
, struct msghdr
*msg
,
703 size_t ignored
, int flags
)
705 return (msg
->msg_iocb
&& !is_sync_kiocb(msg
->msg_iocb
)) ?
706 skcipher_recvmsg_async(sock
, msg
, flags
) :
707 skcipher_recvmsg_sync(sock
, msg
, flags
);
710 static unsigned int skcipher_poll(struct file
*file
, struct socket
*sock
,
713 struct sock
*sk
= sock
->sk
;
714 struct alg_sock
*ask
= alg_sk(sk
);
715 struct skcipher_ctx
*ctx
= ask
->private;
718 sock_poll_wait(file
, sk_sleep(sk
), wait
);
722 mask
|= POLLIN
| POLLRDNORM
;
724 if (skcipher_writable(sk
))
725 mask
|= POLLOUT
| POLLWRNORM
| POLLWRBAND
;
730 static struct proto_ops algif_skcipher_ops
= {
733 .connect
= sock_no_connect
,
734 .socketpair
= sock_no_socketpair
,
735 .getname
= sock_no_getname
,
736 .ioctl
= sock_no_ioctl
,
737 .listen
= sock_no_listen
,
738 .shutdown
= sock_no_shutdown
,
739 .getsockopt
= sock_no_getsockopt
,
740 .mmap
= sock_no_mmap
,
741 .bind
= sock_no_bind
,
742 .accept
= sock_no_accept
,
743 .setsockopt
= sock_no_setsockopt
,
745 .release
= af_alg_release
,
746 .sendmsg
= skcipher_sendmsg
,
747 .sendpage
= skcipher_sendpage
,
748 .recvmsg
= skcipher_recvmsg
,
749 .poll
= skcipher_poll
,
752 static void *skcipher_bind(const char *name
, u32 type
, u32 mask
)
754 return crypto_alloc_ablkcipher(name
, type
, mask
);
757 static void skcipher_release(void *private)
759 crypto_free_ablkcipher(private);
762 static int skcipher_setkey(void *private, const u8
*key
, unsigned int keylen
)
764 return crypto_ablkcipher_setkey(private, key
, keylen
);
767 static void skcipher_wait(struct sock
*sk
)
769 struct alg_sock
*ask
= alg_sk(sk
);
770 struct skcipher_ctx
*ctx
= ask
->private;
773 while (atomic_read(&ctx
->inflight
) && ctr
++ < 100)
777 static void skcipher_sock_destruct(struct sock
*sk
)
779 struct alg_sock
*ask
= alg_sk(sk
);
780 struct skcipher_ctx
*ctx
= ask
->private;
781 struct crypto_ablkcipher
*tfm
= crypto_ablkcipher_reqtfm(&ctx
->req
);
783 if (atomic_read(&ctx
->inflight
))
786 skcipher_free_sgl(sk
);
787 sock_kzfree_s(sk
, ctx
->iv
, crypto_ablkcipher_ivsize(tfm
));
788 sock_kfree_s(sk
, ctx
, ctx
->len
);
789 af_alg_release_parent(sk
);
792 static int skcipher_accept_parent(void *private, struct sock
*sk
)
794 struct skcipher_ctx
*ctx
;
795 struct alg_sock
*ask
= alg_sk(sk
);
796 unsigned int len
= sizeof(*ctx
) + crypto_ablkcipher_reqsize(private);
798 ctx
= sock_kmalloc(sk
, len
, GFP_KERNEL
);
802 ctx
->iv
= sock_kmalloc(sk
, crypto_ablkcipher_ivsize(private),
805 sock_kfree_s(sk
, ctx
, len
);
809 memset(ctx
->iv
, 0, crypto_ablkcipher_ivsize(private));
811 INIT_LIST_HEAD(&ctx
->tsgl
);
817 atomic_set(&ctx
->inflight
, 0);
818 af_alg_init_completion(&ctx
->completion
);
822 ablkcipher_request_set_tfm(&ctx
->req
, private);
823 ablkcipher_request_set_callback(&ctx
->req
, CRYPTO_TFM_REQ_MAY_BACKLOG
,
824 af_alg_complete
, &ctx
->completion
);
826 sk
->sk_destruct
= skcipher_sock_destruct
;
831 static const struct af_alg_type algif_type_skcipher
= {
832 .bind
= skcipher_bind
,
833 .release
= skcipher_release
,
834 .setkey
= skcipher_setkey
,
835 .accept
= skcipher_accept_parent
,
836 .ops
= &algif_skcipher_ops
,
841 static int __init
algif_skcipher_init(void)
843 return af_alg_register_type(&algif_type_skcipher
);
846 static void __exit
algif_skcipher_exit(void)
848 int err
= af_alg_unregister_type(&algif_type_skcipher
);
852 module_init(algif_skcipher_init
);
853 module_exit(algif_skcipher_exit
);
854 MODULE_LICENSE("GPL");