2 * Crypto user configuration API.
4 * Copyright (C) 2011 secunet Security Networks AG
5 * Copyright (C) 2011 Steffen Klassert <steffen.klassert@secunet.com>
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License,
9 * version 2, as published by the Free Software Foundation.
11 * This program is distributed in the hope it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
16 * You should have received a copy of the GNU General Public License along with
17 * this program; if not, write to the Free Software Foundation, Inc.,
18 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
21 #include <linux/module.h>
22 #include <linux/crypto.h>
23 #include <linux/cryptouser.h>
24 #include <linux/sched.h>
25 #include <net/netlink.h>
26 #include <linux/security.h>
27 #include <net/net_namespace.h>
28 #include <crypto/internal/aead.h>
29 #include <crypto/internal/skcipher.h>
33 #define null_terminated(x) (strnlen(x, sizeof(x)) < sizeof(x))
35 static DEFINE_MUTEX(crypto_cfg_mutex
);
37 /* The crypto netlink socket */
38 static struct sock
*crypto_nlsk
;
40 struct crypto_dump_info
{
41 struct sk_buff
*in_skb
;
42 struct sk_buff
*out_skb
;
47 static struct crypto_alg
*crypto_alg_match(struct crypto_user_alg
*p
, int exact
)
49 struct crypto_alg
*q
, *alg
= NULL
;
51 down_read(&crypto_alg_sem
);
53 list_for_each_entry(q
, &crypto_alg_list
, cra_list
) {
56 if ((q
->cra_flags
^ p
->cru_type
) & p
->cru_mask
)
59 if (strlen(p
->cru_driver_name
))
60 match
= !strcmp(q
->cra_driver_name
,
63 match
= !strcmp(q
->cra_name
, p
->cru_name
);
71 up_read(&crypto_alg_sem
);
76 static int crypto_report_cipher(struct sk_buff
*skb
, struct crypto_alg
*alg
)
78 struct crypto_report_cipher rcipher
;
80 strncpy(rcipher
.type
, "cipher", sizeof(rcipher
.type
));
82 rcipher
.blocksize
= alg
->cra_blocksize
;
83 rcipher
.min_keysize
= alg
->cra_cipher
.cia_min_keysize
;
84 rcipher
.max_keysize
= alg
->cra_cipher
.cia_max_keysize
;
86 if (nla_put(skb
, CRYPTOCFGA_REPORT_CIPHER
,
87 sizeof(struct crypto_report_cipher
), &rcipher
))
95 static int crypto_report_comp(struct sk_buff
*skb
, struct crypto_alg
*alg
)
97 struct crypto_report_comp rcomp
;
99 strncpy(rcomp
.type
, "compression", sizeof(rcomp
.type
));
100 if (nla_put(skb
, CRYPTOCFGA_REPORT_COMPRESS
,
101 sizeof(struct crypto_report_comp
), &rcomp
))
102 goto nla_put_failure
;
109 static int crypto_report_one(struct crypto_alg
*alg
,
110 struct crypto_user_alg
*ualg
, struct sk_buff
*skb
)
112 strncpy(ualg
->cru_name
, alg
->cra_name
, sizeof(ualg
->cru_name
));
113 strncpy(ualg
->cru_driver_name
, alg
->cra_driver_name
,
114 sizeof(ualg
->cru_driver_name
));
115 strncpy(ualg
->cru_module_name
, module_name(alg
->cra_module
),
116 sizeof(ualg
->cru_module_name
));
120 ualg
->cru_flags
= alg
->cra_flags
;
121 ualg
->cru_refcnt
= atomic_read(&alg
->cra_refcnt
);
123 if (nla_put_u32(skb
, CRYPTOCFGA_PRIORITY_VAL
, alg
->cra_priority
))
124 goto nla_put_failure
;
125 if (alg
->cra_flags
& CRYPTO_ALG_LARVAL
) {
126 struct crypto_report_larval rl
;
128 strncpy(rl
.type
, "larval", sizeof(rl
.type
));
129 if (nla_put(skb
, CRYPTOCFGA_REPORT_LARVAL
,
130 sizeof(struct crypto_report_larval
), &rl
))
131 goto nla_put_failure
;
135 if (alg
->cra_type
&& alg
->cra_type
->report
) {
136 if (alg
->cra_type
->report(skb
, alg
))
137 goto nla_put_failure
;
142 switch (alg
->cra_flags
& (CRYPTO_ALG_TYPE_MASK
| CRYPTO_ALG_LARVAL
)) {
143 case CRYPTO_ALG_TYPE_CIPHER
:
144 if (crypto_report_cipher(skb
, alg
))
145 goto nla_put_failure
;
148 case CRYPTO_ALG_TYPE_COMPRESS
:
149 if (crypto_report_comp(skb
, alg
))
150 goto nla_put_failure
;
162 static int crypto_report_alg(struct crypto_alg
*alg
,
163 struct crypto_dump_info
*info
)
165 struct sk_buff
*in_skb
= info
->in_skb
;
166 struct sk_buff
*skb
= info
->out_skb
;
167 struct nlmsghdr
*nlh
;
168 struct crypto_user_alg
*ualg
;
171 nlh
= nlmsg_put(skb
, NETLINK_CB(in_skb
).portid
, info
->nlmsg_seq
,
172 CRYPTO_MSG_GETALG
, sizeof(*ualg
), info
->nlmsg_flags
);
178 ualg
= nlmsg_data(nlh
);
180 err
= crypto_report_one(alg
, ualg
, skb
);
182 nlmsg_cancel(skb
, nlh
);
192 static int crypto_report(struct sk_buff
*in_skb
, struct nlmsghdr
*in_nlh
,
193 struct nlattr
**attrs
)
195 struct crypto_user_alg
*p
= nlmsg_data(in_nlh
);
196 struct crypto_alg
*alg
;
198 struct crypto_dump_info info
;
201 if (!null_terminated(p
->cru_name
) || !null_terminated(p
->cru_driver_name
))
204 if (!p
->cru_driver_name
[0])
207 alg
= crypto_alg_match(p
, 1);
211 skb
= nlmsg_new(NLMSG_DEFAULT_SIZE
, GFP_ATOMIC
);
215 info
.in_skb
= in_skb
;
217 info
.nlmsg_seq
= in_nlh
->nlmsg_seq
;
218 info
.nlmsg_flags
= 0;
220 err
= crypto_report_alg(alg
, &info
);
224 return nlmsg_unicast(crypto_nlsk
, skb
, NETLINK_CB(in_skb
).portid
);
227 static int crypto_dump_report(struct sk_buff
*skb
, struct netlink_callback
*cb
)
229 struct crypto_alg
*alg
;
230 struct crypto_dump_info info
;
238 info
.in_skb
= cb
->skb
;
240 info
.nlmsg_seq
= cb
->nlh
->nlmsg_seq
;
241 info
.nlmsg_flags
= NLM_F_MULTI
;
243 list_for_each_entry(alg
, &crypto_alg_list
, cra_list
) {
244 err
= crypto_report_alg(alg
, &info
);
255 static int crypto_dump_report_done(struct netlink_callback
*cb
)
260 static int crypto_update_alg(struct sk_buff
*skb
, struct nlmsghdr
*nlh
,
261 struct nlattr
**attrs
)
263 struct crypto_alg
*alg
;
264 struct crypto_user_alg
*p
= nlmsg_data(nlh
);
265 struct nlattr
*priority
= attrs
[CRYPTOCFGA_PRIORITY_VAL
];
268 if (!netlink_capable(skb
, CAP_NET_ADMIN
))
271 if (!null_terminated(p
->cru_name
) || !null_terminated(p
->cru_driver_name
))
274 if (priority
&& !strlen(p
->cru_driver_name
))
277 alg
= crypto_alg_match(p
, 1);
281 down_write(&crypto_alg_sem
);
283 crypto_remove_spawns(alg
, &list
, NULL
);
286 alg
->cra_priority
= nla_get_u32(priority
);
288 up_write(&crypto_alg_sem
);
290 crypto_remove_final(&list
);
295 static int crypto_del_alg(struct sk_buff
*skb
, struct nlmsghdr
*nlh
,
296 struct nlattr
**attrs
)
298 struct crypto_alg
*alg
;
299 struct crypto_user_alg
*p
= nlmsg_data(nlh
);
301 if (!netlink_capable(skb
, CAP_NET_ADMIN
))
304 if (!null_terminated(p
->cru_name
) || !null_terminated(p
->cru_driver_name
))
307 alg
= crypto_alg_match(p
, 1);
311 /* We can not unregister core algorithms such as aes-generic.
312 * We would loose the reference in the crypto_alg_list to this algorithm
313 * if we try to unregister. Unregistering such an algorithm without
314 * removing the module is not possible, so we restrict to crypto
315 * instances that are build from templates. */
316 if (!(alg
->cra_flags
& CRYPTO_ALG_INSTANCE
))
319 if (atomic_read(&alg
->cra_refcnt
) != 1)
322 return crypto_unregister_instance(alg
);
325 static struct crypto_alg
*crypto_user_skcipher_alg(const char *name
, u32 type
,
329 struct crypto_alg
*alg
;
331 type
= crypto_skcipher_type(type
);
332 mask
= crypto_skcipher_mask(mask
);
335 alg
= crypto_lookup_skcipher(name
, type
, mask
);
342 if (signal_pending(current
)) {
351 static struct crypto_alg
*crypto_user_aead_alg(const char *name
, u32 type
,
355 struct crypto_alg
*alg
;
357 type
&= ~(CRYPTO_ALG_TYPE_MASK
| CRYPTO_ALG_GENIV
);
358 type
|= CRYPTO_ALG_TYPE_AEAD
;
359 mask
&= ~(CRYPTO_ALG_TYPE_MASK
| CRYPTO_ALG_GENIV
);
360 mask
|= CRYPTO_ALG_TYPE_MASK
;
363 alg
= crypto_lookup_aead(name
, type
, mask
);
370 if (signal_pending(current
)) {
379 static int crypto_add_alg(struct sk_buff
*skb
, struct nlmsghdr
*nlh
,
380 struct nlattr
**attrs
)
384 struct crypto_alg
*alg
;
385 struct crypto_user_alg
*p
= nlmsg_data(nlh
);
386 struct nlattr
*priority
= attrs
[CRYPTOCFGA_PRIORITY_VAL
];
388 if (!netlink_capable(skb
, CAP_NET_ADMIN
))
391 if (!null_terminated(p
->cru_name
) || !null_terminated(p
->cru_driver_name
))
394 if (strlen(p
->cru_driver_name
))
397 if (priority
&& !exact
)
400 alg
= crypto_alg_match(p
, exact
);
404 if (strlen(p
->cru_driver_name
))
405 name
= p
->cru_driver_name
;
409 switch (p
->cru_type
& p
->cru_mask
& CRYPTO_ALG_TYPE_MASK
) {
410 case CRYPTO_ALG_TYPE_AEAD
:
411 alg
= crypto_user_aead_alg(name
, p
->cru_type
, p
->cru_mask
);
413 case CRYPTO_ALG_TYPE_GIVCIPHER
:
414 case CRYPTO_ALG_TYPE_BLKCIPHER
:
415 case CRYPTO_ALG_TYPE_ABLKCIPHER
:
416 alg
= crypto_user_skcipher_alg(name
, p
->cru_type
, p
->cru_mask
);
419 alg
= crypto_alg_mod_lookup(name
, p
->cru_type
, p
->cru_mask
);
425 down_write(&crypto_alg_sem
);
428 alg
->cra_priority
= nla_get_u32(priority
);
430 up_write(&crypto_alg_sem
);
437 #define MSGSIZE(type) sizeof(struct type)
439 static const int crypto_msg_min
[CRYPTO_NR_MSGTYPES
] = {
440 [CRYPTO_MSG_NEWALG
- CRYPTO_MSG_BASE
] = MSGSIZE(crypto_user_alg
),
441 [CRYPTO_MSG_DELALG
- CRYPTO_MSG_BASE
] = MSGSIZE(crypto_user_alg
),
442 [CRYPTO_MSG_UPDATEALG
- CRYPTO_MSG_BASE
] = MSGSIZE(crypto_user_alg
),
443 [CRYPTO_MSG_GETALG
- CRYPTO_MSG_BASE
] = MSGSIZE(crypto_user_alg
),
446 static const struct nla_policy crypto_policy
[CRYPTOCFGA_MAX
+1] = {
447 [CRYPTOCFGA_PRIORITY_VAL
] = { .type
= NLA_U32
},
452 static const struct crypto_link
{
453 int (*doit
)(struct sk_buff
*, struct nlmsghdr
*, struct nlattr
**);
454 int (*dump
)(struct sk_buff
*, struct netlink_callback
*);
455 int (*done
)(struct netlink_callback
*);
456 } crypto_dispatch
[CRYPTO_NR_MSGTYPES
] = {
457 [CRYPTO_MSG_NEWALG
- CRYPTO_MSG_BASE
] = { .doit
= crypto_add_alg
},
458 [CRYPTO_MSG_DELALG
- CRYPTO_MSG_BASE
] = { .doit
= crypto_del_alg
},
459 [CRYPTO_MSG_UPDATEALG
- CRYPTO_MSG_BASE
] = { .doit
= crypto_update_alg
},
460 [CRYPTO_MSG_GETALG
- CRYPTO_MSG_BASE
] = { .doit
= crypto_report
,
461 .dump
= crypto_dump_report
,
462 .done
= crypto_dump_report_done
},
465 static int crypto_user_rcv_msg(struct sk_buff
*skb
, struct nlmsghdr
*nlh
)
467 struct nlattr
*attrs
[CRYPTOCFGA_MAX
+1];
468 const struct crypto_link
*link
;
471 type
= nlh
->nlmsg_type
;
472 if (type
> CRYPTO_MSG_MAX
)
475 type
-= CRYPTO_MSG_BASE
;
476 link
= &crypto_dispatch
[type
];
478 if ((type
== (CRYPTO_MSG_GETALG
- CRYPTO_MSG_BASE
) &&
479 (nlh
->nlmsg_flags
& NLM_F_DUMP
))) {
480 struct crypto_alg
*alg
;
483 if (link
->dump
== NULL
)
486 list_for_each_entry(alg
, &crypto_alg_list
, cra_list
)
487 dump_alloc
+= CRYPTO_REPORT_MAXSIZE
;
490 struct netlink_dump_control c
= {
493 .min_dump_alloc
= dump_alloc
,
495 return netlink_dump_start(crypto_nlsk
, skb
, nlh
, &c
);
499 err
= nlmsg_parse(nlh
, crypto_msg_min
[type
], attrs
, CRYPTOCFGA_MAX
,
504 if (link
->doit
== NULL
)
507 return link
->doit(skb
, nlh
, attrs
);
510 static void crypto_netlink_rcv(struct sk_buff
*skb
)
512 mutex_lock(&crypto_cfg_mutex
);
513 netlink_rcv_skb(skb
, &crypto_user_rcv_msg
);
514 mutex_unlock(&crypto_cfg_mutex
);
517 static int __init
crypto_user_init(void)
519 struct netlink_kernel_cfg cfg
= {
520 .input
= crypto_netlink_rcv
,
523 crypto_nlsk
= netlink_kernel_create(&init_net
, NETLINK_CRYPTO
, &cfg
);
530 static void __exit
crypto_user_exit(void)
532 netlink_kernel_release(crypto_nlsk
);
535 module_init(crypto_user_init
);
536 module_exit(crypto_user_exit
);
537 MODULE_LICENSE("GPL");
538 MODULE_AUTHOR("Steffen Klassert <steffen.klassert@secunet.com>");
539 MODULE_DESCRIPTION("Crypto userspace configuration API");