2 * algif_skcipher: User-space interface for skcipher algorithms
4 * This file provides the user-space API for symmetric key ciphers.
6 * Copyright (c) 2010 Herbert Xu <herbert@gondor.apana.org.au>
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the Free
10 * Software Foundation; either version 2 of the License, or (at your option)
15 #include <crypto/scatterwalk.h>
16 #include <crypto/skcipher.h>
17 #include <crypto/if_alg.h>
18 #include <linux/init.h>
19 #include <linux/list.h>
20 #include <linux/kernel.h>
22 #include <linux/module.h>
23 #include <linux/net.h>
26 struct skcipher_sg_list
{
27 struct list_head list
;
31 struct scatterlist sg
[0];
35 struct list_head tsgl
;
36 struct af_alg_sgl rsgl
;
40 struct af_alg_completion completion
;
49 struct ablkcipher_request req
;
52 #define MAX_SGL_ENTS ((4096 - sizeof(struct skcipher_sg_list)) / \
53 sizeof(struct scatterlist) - 1)
55 static inline int skcipher_sndbuf(struct sock
*sk
)
57 struct alg_sock
*ask
= alg_sk(sk
);
58 struct skcipher_ctx
*ctx
= ask
->private;
60 return max_t(int, max_t(int, sk
->sk_sndbuf
& PAGE_MASK
, PAGE_SIZE
) -
64 static inline bool skcipher_writable(struct sock
*sk
)
66 return PAGE_SIZE
<= skcipher_sndbuf(sk
);
69 static int skcipher_alloc_sgl(struct sock
*sk
)
71 struct alg_sock
*ask
= alg_sk(sk
);
72 struct skcipher_ctx
*ctx
= ask
->private;
73 struct skcipher_sg_list
*sgl
;
74 struct scatterlist
*sg
= NULL
;
76 sgl
= list_entry(ctx
->tsgl
.prev
, struct skcipher_sg_list
, list
);
77 if (!list_empty(&ctx
->tsgl
))
80 if (!sg
|| sgl
->cur
>= MAX_SGL_ENTS
) {
81 sgl
= sock_kmalloc(sk
, sizeof(*sgl
) +
82 sizeof(sgl
->sg
[0]) * (MAX_SGL_ENTS
+ 1),
87 sg_init_table(sgl
->sg
, MAX_SGL_ENTS
+ 1);
91 scatterwalk_sg_chain(sg
, MAX_SGL_ENTS
+ 1, sgl
->sg
);
93 list_add_tail(&sgl
->list
, &ctx
->tsgl
);
99 static void skcipher_pull_sgl(struct sock
*sk
, int used
)
101 struct alg_sock
*ask
= alg_sk(sk
);
102 struct skcipher_ctx
*ctx
= ask
->private;
103 struct skcipher_sg_list
*sgl
;
104 struct scatterlist
*sg
;
107 while (!list_empty(&ctx
->tsgl
)) {
108 sgl
= list_first_entry(&ctx
->tsgl
, struct skcipher_sg_list
,
112 for (i
= 0; i
< sgl
->cur
; i
++) {
113 int plen
= min_t(int, used
, sg
[i
].length
);
115 if (!sg_page(sg
+ i
))
118 sg
[i
].length
-= plen
;
119 sg
[i
].offset
+= plen
;
127 put_page(sg_page(sg
+ i
));
128 sg_assign_page(sg
+ i
, NULL
);
131 list_del(&sgl
->list
);
132 sock_kfree_s(sk
, sgl
,
133 sizeof(*sgl
) + sizeof(sgl
->sg
[0]) *
141 static void skcipher_free_sgl(struct sock
*sk
)
143 struct alg_sock
*ask
= alg_sk(sk
);
144 struct skcipher_ctx
*ctx
= ask
->private;
146 skcipher_pull_sgl(sk
, ctx
->used
);
149 static int skcipher_wait_for_wmem(struct sock
*sk
, unsigned flags
)
153 int err
= -ERESTARTSYS
;
155 if (flags
& MSG_DONTWAIT
)
158 set_bit(SOCK_ASYNC_NOSPACE
, &sk
->sk_socket
->flags
);
161 if (signal_pending(current
))
163 prepare_to_wait(sk_sleep(sk
), &wait
, TASK_INTERRUPTIBLE
);
164 timeout
= MAX_SCHEDULE_TIMEOUT
;
165 if (sk_wait_event(sk
, &timeout
, skcipher_writable(sk
))) {
170 finish_wait(sk_sleep(sk
), &wait
);
175 static void skcipher_wmem_wakeup(struct sock
*sk
)
177 struct socket_wq
*wq
;
179 if (!skcipher_writable(sk
))
183 wq
= rcu_dereference(sk
->sk_wq
);
184 if (wq_has_sleeper(wq
))
185 wake_up_interruptible_sync_poll(&wq
->wait
, POLLIN
|
188 sk_wake_async(sk
, SOCK_WAKE_WAITD
, POLL_IN
);
192 static int skcipher_wait_for_data(struct sock
*sk
, unsigned flags
)
194 struct alg_sock
*ask
= alg_sk(sk
);
195 struct skcipher_ctx
*ctx
= ask
->private;
198 int err
= -ERESTARTSYS
;
200 if (flags
& MSG_DONTWAIT
) {
204 set_bit(SOCK_ASYNC_WAITDATA
, &sk
->sk_socket
->flags
);
207 if (signal_pending(current
))
209 prepare_to_wait(sk_sleep(sk
), &wait
, TASK_INTERRUPTIBLE
);
210 timeout
= MAX_SCHEDULE_TIMEOUT
;
211 if (sk_wait_event(sk
, &timeout
, ctx
->used
)) {
216 finish_wait(sk_sleep(sk
), &wait
);
218 clear_bit(SOCK_ASYNC_WAITDATA
, &sk
->sk_socket
->flags
);
223 static void skcipher_data_wakeup(struct sock
*sk
)
225 struct alg_sock
*ask
= alg_sk(sk
);
226 struct skcipher_ctx
*ctx
= ask
->private;
227 struct socket_wq
*wq
;
233 wq
= rcu_dereference(sk
->sk_wq
);
234 if (wq_has_sleeper(wq
))
235 wake_up_interruptible_sync_poll(&wq
->wait
, POLLOUT
|
238 sk_wake_async(sk
, SOCK_WAKE_SPACE
, POLL_OUT
);
242 static int skcipher_sendmsg(struct kiocb
*unused
, struct socket
*sock
,
243 struct msghdr
*msg
, size_t size
)
245 struct sock
*sk
= sock
->sk
;
246 struct alg_sock
*ask
= alg_sk(sk
);
247 struct skcipher_ctx
*ctx
= ask
->private;
248 struct crypto_ablkcipher
*tfm
= crypto_ablkcipher_reqtfm(&ctx
->req
);
249 unsigned ivsize
= crypto_ablkcipher_ivsize(tfm
);
250 struct skcipher_sg_list
*sgl
;
251 struct af_alg_control con
= {};
258 if (msg
->msg_controllen
) {
259 err
= af_alg_cmsg_send(msg
, &con
);
275 if (con
.iv
&& con
.iv
->ivlen
!= ivsize
)
282 if (!ctx
->more
&& ctx
->used
)
288 memcpy(ctx
->iv
, con
.iv
->iv
, ivsize
);
292 struct scatterlist
*sg
;
293 unsigned long len
= size
;
297 sgl
= list_entry(ctx
->tsgl
.prev
,
298 struct skcipher_sg_list
, list
);
299 sg
= sgl
->sg
+ sgl
->cur
- 1;
300 len
= min_t(unsigned long, len
,
301 PAGE_SIZE
- sg
->offset
- sg
->length
);
303 err
= memcpy_from_msg(page_address(sg_page(sg
)) +
304 sg
->offset
+ sg
->length
,
310 ctx
->merge
= (sg
->offset
+ sg
->length
) &
319 if (!skcipher_writable(sk
)) {
320 err
= skcipher_wait_for_wmem(sk
, msg
->msg_flags
);
325 len
= min_t(unsigned long, len
, skcipher_sndbuf(sk
));
327 err
= skcipher_alloc_sgl(sk
);
331 sgl
= list_entry(ctx
->tsgl
.prev
, struct skcipher_sg_list
, list
);
333 sg_unmark_end(sg
+ sgl
->cur
);
336 plen
= min_t(int, len
, PAGE_SIZE
);
338 sg_assign_page(sg
+ i
, alloc_page(GFP_KERNEL
));
340 if (!sg_page(sg
+ i
))
343 err
= memcpy_from_msg(page_address(sg_page(sg
+ i
)),
346 __free_page(sg_page(sg
+ i
));
347 sg_assign_page(sg
+ i
, NULL
);
357 } while (len
&& sgl
->cur
< MAX_SGL_ENTS
);
360 sg_mark_end(sg
+ sgl
->cur
- 1);
362 ctx
->merge
= plen
& (PAGE_SIZE
- 1);
367 ctx
->more
= msg
->msg_flags
& MSG_MORE
;
370 skcipher_data_wakeup(sk
);
373 return copied
?: err
;
376 static ssize_t
skcipher_sendpage(struct socket
*sock
, struct page
*page
,
377 int offset
, size_t size
, int flags
)
379 struct sock
*sk
= sock
->sk
;
380 struct alg_sock
*ask
= alg_sk(sk
);
381 struct skcipher_ctx
*ctx
= ask
->private;
382 struct skcipher_sg_list
*sgl
;
385 if (flags
& MSG_SENDPAGE_NOTLAST
)
389 if (!ctx
->more
&& ctx
->used
)
395 if (!skcipher_writable(sk
)) {
396 err
= skcipher_wait_for_wmem(sk
, flags
);
401 err
= skcipher_alloc_sgl(sk
);
406 sgl
= list_entry(ctx
->tsgl
.prev
, struct skcipher_sg_list
, list
);
409 sg_unmark_end(sgl
->sg
+ sgl
->cur
- 1);
411 sg_mark_end(sgl
->sg
+ sgl
->cur
);
413 sg_set_page(sgl
->sg
+ sgl
->cur
, page
, size
, offset
);
418 ctx
->more
= flags
& MSG_MORE
;
421 skcipher_data_wakeup(sk
);
427 static int skcipher_recvmsg(struct kiocb
*unused
, struct socket
*sock
,
428 struct msghdr
*msg
, size_t ignored
, int flags
)
430 struct sock
*sk
= sock
->sk
;
431 struct alg_sock
*ask
= alg_sk(sk
);
432 struct skcipher_ctx
*ctx
= ask
->private;
433 unsigned bs
= crypto_ablkcipher_blocksize(crypto_ablkcipher_reqtfm(
435 struct skcipher_sg_list
*sgl
;
436 struct scatterlist
*sg
;
442 while (iov_iter_count(&msg
->msg_iter
)) {
443 sgl
= list_first_entry(&ctx
->tsgl
,
444 struct skcipher_sg_list
, list
);
451 err
= skcipher_wait_for_data(sk
, flags
);
456 used
= min_t(unsigned long, ctx
->used
, iov_iter_count(&msg
->msg_iter
));
458 used
= af_alg_make_sg(&ctx
->rsgl
, &msg
->msg_iter
, used
);
463 if (ctx
->more
|| used
< ctx
->used
)
470 ablkcipher_request_set_crypt(&ctx
->req
, sg
,
474 err
= af_alg_wait_for_completion(
476 crypto_ablkcipher_encrypt(&ctx
->req
) :
477 crypto_ablkcipher_decrypt(&ctx
->req
),
481 af_alg_free_sg(&ctx
->rsgl
);
487 skcipher_pull_sgl(sk
, used
);
488 iov_iter_advance(&msg
->msg_iter
, used
);
494 skcipher_wmem_wakeup(sk
);
497 return copied
?: err
;
501 static unsigned int skcipher_poll(struct file
*file
, struct socket
*sock
,
504 struct sock
*sk
= sock
->sk
;
505 struct alg_sock
*ask
= alg_sk(sk
);
506 struct skcipher_ctx
*ctx
= ask
->private;
509 sock_poll_wait(file
, sk_sleep(sk
), wait
);
513 mask
|= POLLIN
| POLLRDNORM
;
515 if (skcipher_writable(sk
))
516 mask
|= POLLOUT
| POLLWRNORM
| POLLWRBAND
;
521 static struct proto_ops algif_skcipher_ops
= {
524 .connect
= sock_no_connect
,
525 .socketpair
= sock_no_socketpair
,
526 .getname
= sock_no_getname
,
527 .ioctl
= sock_no_ioctl
,
528 .listen
= sock_no_listen
,
529 .shutdown
= sock_no_shutdown
,
530 .getsockopt
= sock_no_getsockopt
,
531 .mmap
= sock_no_mmap
,
532 .bind
= sock_no_bind
,
533 .accept
= sock_no_accept
,
534 .setsockopt
= sock_no_setsockopt
,
536 .release
= af_alg_release
,
537 .sendmsg
= skcipher_sendmsg
,
538 .sendpage
= skcipher_sendpage
,
539 .recvmsg
= skcipher_recvmsg
,
540 .poll
= skcipher_poll
,
543 static void *skcipher_bind(const char *name
, u32 type
, u32 mask
)
545 return crypto_alloc_ablkcipher(name
, type
, mask
);
548 static void skcipher_release(void *private)
550 crypto_free_ablkcipher(private);
553 static int skcipher_setkey(void *private, const u8
*key
, unsigned int keylen
)
555 return crypto_ablkcipher_setkey(private, key
, keylen
);
558 static void skcipher_sock_destruct(struct sock
*sk
)
560 struct alg_sock
*ask
= alg_sk(sk
);
561 struct skcipher_ctx
*ctx
= ask
->private;
562 struct crypto_ablkcipher
*tfm
= crypto_ablkcipher_reqtfm(&ctx
->req
);
564 skcipher_free_sgl(sk
);
565 sock_kzfree_s(sk
, ctx
->iv
, crypto_ablkcipher_ivsize(tfm
));
566 sock_kfree_s(sk
, ctx
, ctx
->len
);
567 af_alg_release_parent(sk
);
570 static int skcipher_accept_parent(void *private, struct sock
*sk
)
572 struct skcipher_ctx
*ctx
;
573 struct alg_sock
*ask
= alg_sk(sk
);
574 unsigned int len
= sizeof(*ctx
) + crypto_ablkcipher_reqsize(private);
576 ctx
= sock_kmalloc(sk
, len
, GFP_KERNEL
);
580 ctx
->iv
= sock_kmalloc(sk
, crypto_ablkcipher_ivsize(private),
583 sock_kfree_s(sk
, ctx
, len
);
587 memset(ctx
->iv
, 0, crypto_ablkcipher_ivsize(private));
589 INIT_LIST_HEAD(&ctx
->tsgl
);
595 af_alg_init_completion(&ctx
->completion
);
599 ablkcipher_request_set_tfm(&ctx
->req
, private);
600 ablkcipher_request_set_callback(&ctx
->req
, CRYPTO_TFM_REQ_MAY_BACKLOG
,
601 af_alg_complete
, &ctx
->completion
);
603 sk
->sk_destruct
= skcipher_sock_destruct
;
608 static const struct af_alg_type algif_type_skcipher
= {
609 .bind
= skcipher_bind
,
610 .release
= skcipher_release
,
611 .setkey
= skcipher_setkey
,
612 .accept
= skcipher_accept_parent
,
613 .ops
= &algif_skcipher_ops
,
618 static int __init
algif_skcipher_init(void)
620 return af_alg_register_type(&algif_type_skcipher
);
623 static void __exit
algif_skcipher_exit(void)
625 int err
= af_alg_unregister_type(&algif_type_skcipher
);
629 module_init(algif_skcipher_init
);
630 module_exit(algif_skcipher_exit
);
631 MODULE_LICENSE("GPL");