1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * algif_skcipher: User-space interface for skcipher algorithms
5 * This file provides the user-space API for symmetric key ciphers.
7 * Copyright (c) 2010 Herbert Xu <herbert@gondor.apana.org.au>
9 * The following concept of the memory management is used:
11 * The kernel maintains two SGLs, the TX SGL and the RX SGL. The TX SGL is
12 * filled by user space with the data submitted via sendmsg. Filling up the TX
13 * SGL does not cause a crypto operation -- the data will only be tracked by
14 * the kernel. Upon receipt of one recvmsg call, the caller must provide a
15 * buffer which is tracked with the RX SGL.
17 * During the processing of the recvmsg operation, the cipher request is
18 * allocated and prepared. As part of the recvmsg operation, the processed
19 * TX buffers are extracted from the TX SGL into a separate SGL.
21 * After the completion of the crypto operation, the RX SGL and the cipher
22 * request is released. The extracted TX SGL parts are released together with
26 #include <crypto/scatterwalk.h>
27 #include <crypto/skcipher.h>
28 #include <crypto/if_alg.h>
29 #include <linux/init.h>
30 #include <linux/list.h>
31 #include <linux/kernel.h>
33 #include <linux/module.h>
34 #include <linux/net.h>
37 static int skcipher_sendmsg(struct socket
*sock
, struct msghdr
*msg
,
40 struct sock
*sk
= sock
->sk
;
41 struct alg_sock
*ask
= alg_sk(sk
);
42 struct sock
*psk
= ask
->parent
;
43 struct alg_sock
*pask
= alg_sk(psk
);
44 struct crypto_skcipher
*tfm
= pask
->private;
45 unsigned ivsize
= crypto_skcipher_ivsize(tfm
);
47 return af_alg_sendmsg(sock
, msg
, size
, ivsize
);
50 static int algif_skcipher_export(struct sock
*sk
, struct skcipher_request
*req
)
52 struct alg_sock
*ask
= alg_sk(sk
);
53 struct crypto_skcipher
*tfm
;
54 struct af_alg_ctx
*ctx
;
55 struct alg_sock
*pask
;
60 if (!(req
->base
.flags
& CRYPTO_SKCIPHER_REQ_NOTFINAL
))
68 statesize
= crypto_skcipher_statesize(tfm
);
69 ctx
->state
= sock_kmalloc(sk
, statesize
, GFP_ATOMIC
);
73 err
= crypto_skcipher_export(req
, ctx
->state
);
75 sock_kzfree_s(sk
, ctx
->state
, statesize
);
82 static void algif_skcipher_done(void *data
, int err
)
84 struct af_alg_async_req
*areq
= data
;
85 struct sock
*sk
= areq
->sk
;
90 err
= algif_skcipher_export(sk
, &areq
->cra_u
.skcipher_req
);
93 af_alg_async_cb(data
, err
);
96 static int _skcipher_recvmsg(struct socket
*sock
, struct msghdr
*msg
,
97 size_t ignored
, int flags
)
99 struct sock
*sk
= sock
->sk
;
100 struct alg_sock
*ask
= alg_sk(sk
);
101 struct sock
*psk
= ask
->parent
;
102 struct alg_sock
*pask
= alg_sk(psk
);
103 struct af_alg_ctx
*ctx
= ask
->private;
104 struct crypto_skcipher
*tfm
= pask
->private;
105 unsigned int bs
= crypto_skcipher_chunksize(tfm
);
106 struct af_alg_async_req
*areq
;
111 if (!ctx
->init
|| (ctx
->more
&& ctx
->used
< bs
)) {
112 err
= af_alg_wait_for_data(sk
, flags
, bs
);
117 /* Allocate cipher request for current operation. */
118 areq
= af_alg_alloc_areq(sk
, sizeof(struct af_alg_async_req
) +
119 crypto_skcipher_reqsize(tfm
));
121 return PTR_ERR(areq
);
123 /* convert iovecs of output buffers into RX SGL */
124 err
= af_alg_get_rsgl(sk
, msg
, flags
, areq
, ctx
->used
, &len
);
129 * If more buffers are to be expected to be processed, process only
130 * full block size buffers.
132 if (ctx
->more
|| len
< ctx
->used
) {
134 cflags
|= CRYPTO_SKCIPHER_REQ_NOTFINAL
;
138 * Create a per request TX SGL for this request which tracks the
139 * SG entries from the global TX SGL.
141 areq
->tsgl_entries
= af_alg_count_tsgl(sk
, len
, 0);
142 if (!areq
->tsgl_entries
)
143 areq
->tsgl_entries
= 1;
144 areq
->tsgl
= sock_kmalloc(sk
, array_size(sizeof(*areq
->tsgl
),
151 sg_init_table(areq
->tsgl
, areq
->tsgl_entries
);
152 af_alg_pull_tsgl(sk
, len
, areq
->tsgl
, 0);
154 /* Initialize the crypto operation */
155 skcipher_request_set_tfm(&areq
->cra_u
.skcipher_req
, tfm
);
156 skcipher_request_set_crypt(&areq
->cra_u
.skcipher_req
, areq
->tsgl
,
157 areq
->first_rsgl
.sgl
.sgt
.sgl
, len
, ctx
->iv
);
160 err
= crypto_skcipher_import(&areq
->cra_u
.skcipher_req
,
162 sock_kzfree_s(sk
, ctx
->state
, crypto_skcipher_statesize(tfm
));
166 cflags
|= CRYPTO_SKCIPHER_REQ_CONT
;
169 if (msg
->msg_iocb
&& !is_sync_kiocb(msg
->msg_iocb
)) {
172 areq
->iocb
= msg
->msg_iocb
;
174 /* Remember output size that will be generated. */
177 skcipher_request_set_callback(&areq
->cra_u
.skcipher_req
,
179 CRYPTO_TFM_REQ_MAY_SLEEP
,
180 algif_skcipher_done
, areq
);
182 crypto_skcipher_encrypt(&areq
->cra_u
.skcipher_req
) :
183 crypto_skcipher_decrypt(&areq
->cra_u
.skcipher_req
);
185 /* AIO operation in progress */
186 if (err
== -EINPROGRESS
)
191 /* Synchronous operation */
192 skcipher_request_set_callback(&areq
->cra_u
.skcipher_req
,
194 CRYPTO_TFM_REQ_MAY_SLEEP
|
195 CRYPTO_TFM_REQ_MAY_BACKLOG
,
196 crypto_req_done
, &ctx
->wait
);
197 err
= crypto_wait_req(ctx
->enc
?
198 crypto_skcipher_encrypt(&areq
->cra_u
.skcipher_req
) :
199 crypto_skcipher_decrypt(&areq
->cra_u
.skcipher_req
),
203 err
= algif_skcipher_export(
204 sk
, &areq
->cra_u
.skcipher_req
);
208 af_alg_free_resources(areq
);
210 return err
? err
: len
;
213 static int skcipher_recvmsg(struct socket
*sock
, struct msghdr
*msg
,
214 size_t ignored
, int flags
)
216 struct sock
*sk
= sock
->sk
;
220 while (msg_data_left(msg
)) {
221 int err
= _skcipher_recvmsg(sock
, msg
, ignored
, flags
);
224 * This error covers -EIOCBQUEUED which implies that we can
225 * only handle one AIO request. If the caller wants to have
226 * multiple AIO requests in parallel, he must make multiple
227 * separate AIO calls.
229 * Also return the error if no data has been processed so far.
232 if (err
== -EIOCBQUEUED
|| !ret
)
241 af_alg_wmem_wakeup(sk
);
246 static struct proto_ops algif_skcipher_ops
= {
249 .connect
= sock_no_connect
,
250 .socketpair
= sock_no_socketpair
,
251 .getname
= sock_no_getname
,
252 .ioctl
= sock_no_ioctl
,
253 .listen
= sock_no_listen
,
254 .shutdown
= sock_no_shutdown
,
255 .mmap
= sock_no_mmap
,
256 .bind
= sock_no_bind
,
257 .accept
= sock_no_accept
,
259 .release
= af_alg_release
,
260 .sendmsg
= skcipher_sendmsg
,
261 .recvmsg
= skcipher_recvmsg
,
265 static int skcipher_check_key(struct socket
*sock
)
269 struct alg_sock
*pask
;
270 struct crypto_skcipher
*tfm
;
271 struct sock
*sk
= sock
->sk
;
272 struct alg_sock
*ask
= alg_sk(sk
);
275 if (!atomic_read(&ask
->nokey_refcnt
))
279 pask
= alg_sk(ask
->parent
);
283 lock_sock_nested(psk
, SINGLE_DEPTH_NESTING
);
284 if (crypto_skcipher_get_flags(tfm
) & CRYPTO_TFM_NEED_KEY
)
287 atomic_dec(&pask
->nokey_refcnt
);
288 atomic_set(&ask
->nokey_refcnt
, 0);
300 static int skcipher_sendmsg_nokey(struct socket
*sock
, struct msghdr
*msg
,
305 err
= skcipher_check_key(sock
);
309 return skcipher_sendmsg(sock
, msg
, size
);
312 static int skcipher_recvmsg_nokey(struct socket
*sock
, struct msghdr
*msg
,
313 size_t ignored
, int flags
)
317 err
= skcipher_check_key(sock
);
321 return skcipher_recvmsg(sock
, msg
, ignored
, flags
);
324 static struct proto_ops algif_skcipher_ops_nokey
= {
327 .connect
= sock_no_connect
,
328 .socketpair
= sock_no_socketpair
,
329 .getname
= sock_no_getname
,
330 .ioctl
= sock_no_ioctl
,
331 .listen
= sock_no_listen
,
332 .shutdown
= sock_no_shutdown
,
333 .mmap
= sock_no_mmap
,
334 .bind
= sock_no_bind
,
335 .accept
= sock_no_accept
,
337 .release
= af_alg_release
,
338 .sendmsg
= skcipher_sendmsg_nokey
,
339 .recvmsg
= skcipher_recvmsg_nokey
,
343 static void *skcipher_bind(const char *name
, u32 type
, u32 mask
)
345 return crypto_alloc_skcipher(name
, type
, mask
);
348 static void skcipher_release(void *private)
350 crypto_free_skcipher(private);
353 static int skcipher_setkey(void *private, const u8
*key
, unsigned int keylen
)
355 return crypto_skcipher_setkey(private, key
, keylen
);
358 static void skcipher_sock_destruct(struct sock
*sk
)
360 struct alg_sock
*ask
= alg_sk(sk
);
361 struct af_alg_ctx
*ctx
= ask
->private;
362 struct sock
*psk
= ask
->parent
;
363 struct alg_sock
*pask
= alg_sk(psk
);
364 struct crypto_skcipher
*tfm
= pask
->private;
366 af_alg_pull_tsgl(sk
, ctx
->used
, NULL
, 0);
367 sock_kzfree_s(sk
, ctx
->iv
, crypto_skcipher_ivsize(tfm
));
369 sock_kzfree_s(sk
, ctx
->state
, crypto_skcipher_statesize(tfm
));
370 sock_kfree_s(sk
, ctx
, ctx
->len
);
371 af_alg_release_parent(sk
);
374 static int skcipher_accept_parent_nokey(void *private, struct sock
*sk
)
376 struct af_alg_ctx
*ctx
;
377 struct alg_sock
*ask
= alg_sk(sk
);
378 struct crypto_skcipher
*tfm
= private;
379 unsigned int len
= sizeof(*ctx
);
381 ctx
= sock_kmalloc(sk
, len
, GFP_KERNEL
);
386 ctx
->iv
= sock_kmalloc(sk
, crypto_skcipher_ivsize(tfm
),
389 sock_kfree_s(sk
, ctx
, len
);
392 memset(ctx
->iv
, 0, crypto_skcipher_ivsize(tfm
));
394 INIT_LIST_HEAD(&ctx
->tsgl_list
);
396 crypto_init_wait(&ctx
->wait
);
400 sk
->sk_destruct
= skcipher_sock_destruct
;
405 static int skcipher_accept_parent(void *private, struct sock
*sk
)
407 struct crypto_skcipher
*tfm
= private;
409 if (crypto_skcipher_get_flags(tfm
) & CRYPTO_TFM_NEED_KEY
)
412 return skcipher_accept_parent_nokey(private, sk
);
415 static const struct af_alg_type algif_type_skcipher
= {
416 .bind
= skcipher_bind
,
417 .release
= skcipher_release
,
418 .setkey
= skcipher_setkey
,
419 .accept
= skcipher_accept_parent
,
420 .accept_nokey
= skcipher_accept_parent_nokey
,
421 .ops
= &algif_skcipher_ops
,
422 .ops_nokey
= &algif_skcipher_ops_nokey
,
427 static int __init
algif_skcipher_init(void)
429 return af_alg_register_type(&algif_type_skcipher
);
432 static void __exit
algif_skcipher_exit(void)
434 int err
= af_alg_unregister_type(&algif_type_skcipher
);
438 module_init(algif_skcipher_init
);
439 module_exit(algif_skcipher_exit
);
440 MODULE_DESCRIPTION("Userspace interface for skcipher algorithms");
441 MODULE_LICENSE("GPL");