OMAP3: PM: move context-loss counting into OMAP PM
[linux-ginger.git] / net / xfrm / xfrm_user.c
blobb95a2d64eb59c3f86e066acc86fee8e2e7fb61e1
1 /* xfrm_user.c: User interface to configure xfrm engine.
3 * Copyright (C) 2002 David S. Miller (davem@redhat.com)
5 * Changes:
6 * Mitsuru KANDA @USAGI
7 * Kazunori MIYAZAWA @USAGI
8 * Kunihiro Ishiguro <kunihiro@ipinfusion.com>
9 * IPv6 support
13 #include <linux/crypto.h>
14 #include <linux/module.h>
15 #include <linux/kernel.h>
16 #include <linux/types.h>
17 #include <linux/slab.h>
18 #include <linux/socket.h>
19 #include <linux/string.h>
20 #include <linux/net.h>
21 #include <linux/skbuff.h>
22 #include <linux/pfkeyv2.h>
23 #include <linux/ipsec.h>
24 #include <linux/init.h>
25 #include <linux/security.h>
26 #include <net/sock.h>
27 #include <net/xfrm.h>
28 #include <net/netlink.h>
29 #include <asm/uaccess.h>
30 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
31 #include <linux/in6.h>
32 #endif
34 static inline int aead_len(struct xfrm_algo_aead *alg)
36 return sizeof(*alg) + ((alg->alg_key_len + 7) / 8);
39 static int verify_one_alg(struct nlattr **attrs, enum xfrm_attr_type_t type)
41 struct nlattr *rt = attrs[type];
42 struct xfrm_algo *algp;
44 if (!rt)
45 return 0;
47 algp = nla_data(rt);
48 if (nla_len(rt) < xfrm_alg_len(algp))
49 return -EINVAL;
51 switch (type) {
52 case XFRMA_ALG_AUTH:
53 case XFRMA_ALG_CRYPT:
54 case XFRMA_ALG_COMP:
55 break;
57 default:
58 return -EINVAL;
61 algp->alg_name[CRYPTO_MAX_ALG_NAME - 1] = '\0';
62 return 0;
65 static int verify_aead(struct nlattr **attrs)
67 struct nlattr *rt = attrs[XFRMA_ALG_AEAD];
68 struct xfrm_algo_aead *algp;
70 if (!rt)
71 return 0;
73 algp = nla_data(rt);
74 if (nla_len(rt) < aead_len(algp))
75 return -EINVAL;
77 algp->alg_name[CRYPTO_MAX_ALG_NAME - 1] = '\0';
78 return 0;
81 static void verify_one_addr(struct nlattr **attrs, enum xfrm_attr_type_t type,
82 xfrm_address_t **addrp)
84 struct nlattr *rt = attrs[type];
86 if (rt && addrp)
87 *addrp = nla_data(rt);
90 static inline int verify_sec_ctx_len(struct nlattr **attrs)
92 struct nlattr *rt = attrs[XFRMA_SEC_CTX];
93 struct xfrm_user_sec_ctx *uctx;
95 if (!rt)
96 return 0;
98 uctx = nla_data(rt);
99 if (uctx->len != (sizeof(struct xfrm_user_sec_ctx) + uctx->ctx_len))
100 return -EINVAL;
102 return 0;
106 static int verify_newsa_info(struct xfrm_usersa_info *p,
107 struct nlattr **attrs)
109 int err;
111 err = -EINVAL;
112 switch (p->family) {
113 case AF_INET:
114 break;
116 case AF_INET6:
117 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
118 break;
119 #else
120 err = -EAFNOSUPPORT;
121 goto out;
122 #endif
124 default:
125 goto out;
128 err = -EINVAL;
129 switch (p->id.proto) {
130 case IPPROTO_AH:
131 if (!attrs[XFRMA_ALG_AUTH] ||
132 attrs[XFRMA_ALG_AEAD] ||
133 attrs[XFRMA_ALG_CRYPT] ||
134 attrs[XFRMA_ALG_COMP])
135 goto out;
136 break;
138 case IPPROTO_ESP:
139 if (attrs[XFRMA_ALG_COMP])
140 goto out;
141 if (!attrs[XFRMA_ALG_AUTH] &&
142 !attrs[XFRMA_ALG_CRYPT] &&
143 !attrs[XFRMA_ALG_AEAD])
144 goto out;
145 if ((attrs[XFRMA_ALG_AUTH] ||
146 attrs[XFRMA_ALG_CRYPT]) &&
147 attrs[XFRMA_ALG_AEAD])
148 goto out;
149 break;
151 case IPPROTO_COMP:
152 if (!attrs[XFRMA_ALG_COMP] ||
153 attrs[XFRMA_ALG_AEAD] ||
154 attrs[XFRMA_ALG_AUTH] ||
155 attrs[XFRMA_ALG_CRYPT])
156 goto out;
157 break;
159 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
160 case IPPROTO_DSTOPTS:
161 case IPPROTO_ROUTING:
162 if (attrs[XFRMA_ALG_COMP] ||
163 attrs[XFRMA_ALG_AUTH] ||
164 attrs[XFRMA_ALG_AEAD] ||
165 attrs[XFRMA_ALG_CRYPT] ||
166 attrs[XFRMA_ENCAP] ||
167 attrs[XFRMA_SEC_CTX] ||
168 !attrs[XFRMA_COADDR])
169 goto out;
170 break;
171 #endif
173 default:
174 goto out;
177 if ((err = verify_aead(attrs)))
178 goto out;
179 if ((err = verify_one_alg(attrs, XFRMA_ALG_AUTH)))
180 goto out;
181 if ((err = verify_one_alg(attrs, XFRMA_ALG_CRYPT)))
182 goto out;
183 if ((err = verify_one_alg(attrs, XFRMA_ALG_COMP)))
184 goto out;
185 if ((err = verify_sec_ctx_len(attrs)))
186 goto out;
188 err = -EINVAL;
189 switch (p->mode) {
190 case XFRM_MODE_TRANSPORT:
191 case XFRM_MODE_TUNNEL:
192 case XFRM_MODE_ROUTEOPTIMIZATION:
193 case XFRM_MODE_BEET:
194 break;
196 default:
197 goto out;
200 err = 0;
202 out:
203 return err;
206 static int attach_one_algo(struct xfrm_algo **algpp, u8 *props,
207 struct xfrm_algo_desc *(*get_byname)(char *, int),
208 struct nlattr *rta)
210 struct xfrm_algo *p, *ualg;
211 struct xfrm_algo_desc *algo;
213 if (!rta)
214 return 0;
216 ualg = nla_data(rta);
218 algo = get_byname(ualg->alg_name, 1);
219 if (!algo)
220 return -ENOSYS;
221 *props = algo->desc.sadb_alg_id;
223 p = kmemdup(ualg, xfrm_alg_len(ualg), GFP_KERNEL);
224 if (!p)
225 return -ENOMEM;
227 strcpy(p->alg_name, algo->name);
228 *algpp = p;
229 return 0;
232 static int attach_aead(struct xfrm_algo_aead **algpp, u8 *props,
233 struct nlattr *rta)
235 struct xfrm_algo_aead *p, *ualg;
236 struct xfrm_algo_desc *algo;
238 if (!rta)
239 return 0;
241 ualg = nla_data(rta);
243 algo = xfrm_aead_get_byname(ualg->alg_name, ualg->alg_icv_len, 1);
244 if (!algo)
245 return -ENOSYS;
246 *props = algo->desc.sadb_alg_id;
248 p = kmemdup(ualg, aead_len(ualg), GFP_KERNEL);
249 if (!p)
250 return -ENOMEM;
252 strcpy(p->alg_name, algo->name);
253 *algpp = p;
254 return 0;
257 static inline int xfrm_user_sec_ctx_size(struct xfrm_sec_ctx *xfrm_ctx)
259 int len = 0;
261 if (xfrm_ctx) {
262 len += sizeof(struct xfrm_user_sec_ctx);
263 len += xfrm_ctx->ctx_len;
265 return len;
268 static void copy_from_user_state(struct xfrm_state *x, struct xfrm_usersa_info *p)
270 memcpy(&x->id, &p->id, sizeof(x->id));
271 memcpy(&x->sel, &p->sel, sizeof(x->sel));
272 memcpy(&x->lft, &p->lft, sizeof(x->lft));
273 x->props.mode = p->mode;
274 x->props.replay_window = p->replay_window;
275 x->props.reqid = p->reqid;
276 x->props.family = p->family;
277 memcpy(&x->props.saddr, &p->saddr, sizeof(x->props.saddr));
278 x->props.flags = p->flags;
280 if (!x->sel.family && !(p->flags & XFRM_STATE_AF_UNSPEC))
281 x->sel.family = p->family;
285 * someday when pfkey also has support, we could have the code
286 * somehow made shareable and move it to xfrm_state.c - JHS
289 static void xfrm_update_ae_params(struct xfrm_state *x, struct nlattr **attrs)
291 struct nlattr *rp = attrs[XFRMA_REPLAY_VAL];
292 struct nlattr *lt = attrs[XFRMA_LTIME_VAL];
293 struct nlattr *et = attrs[XFRMA_ETIMER_THRESH];
294 struct nlattr *rt = attrs[XFRMA_REPLAY_THRESH];
296 if (rp) {
297 struct xfrm_replay_state *replay;
298 replay = nla_data(rp);
299 memcpy(&x->replay, replay, sizeof(*replay));
300 memcpy(&x->preplay, replay, sizeof(*replay));
303 if (lt) {
304 struct xfrm_lifetime_cur *ltime;
305 ltime = nla_data(lt);
306 x->curlft.bytes = ltime->bytes;
307 x->curlft.packets = ltime->packets;
308 x->curlft.add_time = ltime->add_time;
309 x->curlft.use_time = ltime->use_time;
312 if (et)
313 x->replay_maxage = nla_get_u32(et);
315 if (rt)
316 x->replay_maxdiff = nla_get_u32(rt);
319 static struct xfrm_state *xfrm_state_construct(struct net *net,
320 struct xfrm_usersa_info *p,
321 struct nlattr **attrs,
322 int *errp)
324 struct xfrm_state *x = xfrm_state_alloc(net);
325 int err = -ENOMEM;
327 if (!x)
328 goto error_no_put;
330 copy_from_user_state(x, p);
332 if ((err = attach_aead(&x->aead, &x->props.ealgo,
333 attrs[XFRMA_ALG_AEAD])))
334 goto error;
335 if ((err = attach_one_algo(&x->aalg, &x->props.aalgo,
336 xfrm_aalg_get_byname,
337 attrs[XFRMA_ALG_AUTH])))
338 goto error;
339 if ((err = attach_one_algo(&x->ealg, &x->props.ealgo,
340 xfrm_ealg_get_byname,
341 attrs[XFRMA_ALG_CRYPT])))
342 goto error;
343 if ((err = attach_one_algo(&x->calg, &x->props.calgo,
344 xfrm_calg_get_byname,
345 attrs[XFRMA_ALG_COMP])))
346 goto error;
348 if (attrs[XFRMA_ENCAP]) {
349 x->encap = kmemdup(nla_data(attrs[XFRMA_ENCAP]),
350 sizeof(*x->encap), GFP_KERNEL);
351 if (x->encap == NULL)
352 goto error;
355 if (attrs[XFRMA_COADDR]) {
356 x->coaddr = kmemdup(nla_data(attrs[XFRMA_COADDR]),
357 sizeof(*x->coaddr), GFP_KERNEL);
358 if (x->coaddr == NULL)
359 goto error;
362 err = xfrm_init_state(x);
363 if (err)
364 goto error;
366 if (attrs[XFRMA_SEC_CTX] &&
367 security_xfrm_state_alloc(x, nla_data(attrs[XFRMA_SEC_CTX])))
368 goto error;
370 x->km.seq = p->seq;
371 x->replay_maxdiff = net->xfrm.sysctl_aevent_rseqth;
372 /* sysctl_xfrm_aevent_etime is in 100ms units */
373 x->replay_maxage = (net->xfrm.sysctl_aevent_etime*HZ)/XFRM_AE_ETH_M;
374 x->preplay.bitmap = 0;
375 x->preplay.seq = x->replay.seq+x->replay_maxdiff;
376 x->preplay.oseq = x->replay.oseq +x->replay_maxdiff;
378 /* override default values from above */
380 xfrm_update_ae_params(x, attrs);
382 return x;
384 error:
385 x->km.state = XFRM_STATE_DEAD;
386 xfrm_state_put(x);
387 error_no_put:
388 *errp = err;
389 return NULL;
392 static int xfrm_add_sa(struct sk_buff *skb, struct nlmsghdr *nlh,
393 struct nlattr **attrs)
395 struct net *net = sock_net(skb->sk);
396 struct xfrm_usersa_info *p = nlmsg_data(nlh);
397 struct xfrm_state *x;
398 int err;
399 struct km_event c;
400 uid_t loginuid = NETLINK_CB(skb).loginuid;
401 u32 sessionid = NETLINK_CB(skb).sessionid;
402 u32 sid = NETLINK_CB(skb).sid;
404 err = verify_newsa_info(p, attrs);
405 if (err)
406 return err;
408 x = xfrm_state_construct(net, p, attrs, &err);
409 if (!x)
410 return err;
412 xfrm_state_hold(x);
413 if (nlh->nlmsg_type == XFRM_MSG_NEWSA)
414 err = xfrm_state_add(x);
415 else
416 err = xfrm_state_update(x);
418 xfrm_audit_state_add(x, err ? 0 : 1, loginuid, sessionid, sid);
420 if (err < 0) {
421 x->km.state = XFRM_STATE_DEAD;
422 __xfrm_state_put(x);
423 goto out;
426 c.seq = nlh->nlmsg_seq;
427 c.pid = nlh->nlmsg_pid;
428 c.event = nlh->nlmsg_type;
430 km_state_notify(x, &c);
431 out:
432 xfrm_state_put(x);
433 return err;
436 static struct xfrm_state *xfrm_user_state_lookup(struct net *net,
437 struct xfrm_usersa_id *p,
438 struct nlattr **attrs,
439 int *errp)
441 struct xfrm_state *x = NULL;
442 int err;
444 if (xfrm_id_proto_match(p->proto, IPSEC_PROTO_ANY)) {
445 err = -ESRCH;
446 x = xfrm_state_lookup(net, &p->daddr, p->spi, p->proto, p->family);
447 } else {
448 xfrm_address_t *saddr = NULL;
450 verify_one_addr(attrs, XFRMA_SRCADDR, &saddr);
451 if (!saddr) {
452 err = -EINVAL;
453 goto out;
456 err = -ESRCH;
457 x = xfrm_state_lookup_byaddr(net, &p->daddr, saddr,
458 p->proto, p->family);
461 out:
462 if (!x && errp)
463 *errp = err;
464 return x;
467 static int xfrm_del_sa(struct sk_buff *skb, struct nlmsghdr *nlh,
468 struct nlattr **attrs)
470 struct net *net = sock_net(skb->sk);
471 struct xfrm_state *x;
472 int err = -ESRCH;
473 struct km_event c;
474 struct xfrm_usersa_id *p = nlmsg_data(nlh);
475 uid_t loginuid = NETLINK_CB(skb).loginuid;
476 u32 sessionid = NETLINK_CB(skb).sessionid;
477 u32 sid = NETLINK_CB(skb).sid;
479 x = xfrm_user_state_lookup(net, p, attrs, &err);
480 if (x == NULL)
481 return err;
483 if ((err = security_xfrm_state_delete(x)) != 0)
484 goto out;
486 if (xfrm_state_kern(x)) {
487 err = -EPERM;
488 goto out;
491 err = xfrm_state_delete(x);
493 if (err < 0)
494 goto out;
496 c.seq = nlh->nlmsg_seq;
497 c.pid = nlh->nlmsg_pid;
498 c.event = nlh->nlmsg_type;
499 km_state_notify(x, &c);
501 out:
502 xfrm_audit_state_delete(x, err ? 0 : 1, loginuid, sessionid, sid);
503 xfrm_state_put(x);
504 return err;
507 static void copy_to_user_state(struct xfrm_state *x, struct xfrm_usersa_info *p)
509 memcpy(&p->id, &x->id, sizeof(p->id));
510 memcpy(&p->sel, &x->sel, sizeof(p->sel));
511 memcpy(&p->lft, &x->lft, sizeof(p->lft));
512 memcpy(&p->curlft, &x->curlft, sizeof(p->curlft));
513 memcpy(&p->stats, &x->stats, sizeof(p->stats));
514 memcpy(&p->saddr, &x->props.saddr, sizeof(p->saddr));
515 p->mode = x->props.mode;
516 p->replay_window = x->props.replay_window;
517 p->reqid = x->props.reqid;
518 p->family = x->props.family;
519 p->flags = x->props.flags;
520 p->seq = x->km.seq;
523 struct xfrm_dump_info {
524 struct sk_buff *in_skb;
525 struct sk_buff *out_skb;
526 u32 nlmsg_seq;
527 u16 nlmsg_flags;
530 static int copy_sec_ctx(struct xfrm_sec_ctx *s, struct sk_buff *skb)
532 struct xfrm_user_sec_ctx *uctx;
533 struct nlattr *attr;
534 int ctx_size = sizeof(*uctx) + s->ctx_len;
536 attr = nla_reserve(skb, XFRMA_SEC_CTX, ctx_size);
537 if (attr == NULL)
538 return -EMSGSIZE;
540 uctx = nla_data(attr);
541 uctx->exttype = XFRMA_SEC_CTX;
542 uctx->len = ctx_size;
543 uctx->ctx_doi = s->ctx_doi;
544 uctx->ctx_alg = s->ctx_alg;
545 uctx->ctx_len = s->ctx_len;
546 memcpy(uctx + 1, s->ctx_str, s->ctx_len);
548 return 0;
551 /* Don't change this without updating xfrm_sa_len! */
552 static int copy_to_user_state_extra(struct xfrm_state *x,
553 struct xfrm_usersa_info *p,
554 struct sk_buff *skb)
556 copy_to_user_state(x, p);
558 if (x->coaddr)
559 NLA_PUT(skb, XFRMA_COADDR, sizeof(*x->coaddr), x->coaddr);
561 if (x->lastused)
562 NLA_PUT_U64(skb, XFRMA_LASTUSED, x->lastused);
564 if (x->aead)
565 NLA_PUT(skb, XFRMA_ALG_AEAD, aead_len(x->aead), x->aead);
566 if (x->aalg)
567 NLA_PUT(skb, XFRMA_ALG_AUTH, xfrm_alg_len(x->aalg), x->aalg);
568 if (x->ealg)
569 NLA_PUT(skb, XFRMA_ALG_CRYPT, xfrm_alg_len(x->ealg), x->ealg);
570 if (x->calg)
571 NLA_PUT(skb, XFRMA_ALG_COMP, sizeof(*(x->calg)), x->calg);
573 if (x->encap)
574 NLA_PUT(skb, XFRMA_ENCAP, sizeof(*x->encap), x->encap);
576 if (x->security && copy_sec_ctx(x->security, skb) < 0)
577 goto nla_put_failure;
579 return 0;
581 nla_put_failure:
582 return -EMSGSIZE;
585 static int dump_one_state(struct xfrm_state *x, int count, void *ptr)
587 struct xfrm_dump_info *sp = ptr;
588 struct sk_buff *in_skb = sp->in_skb;
589 struct sk_buff *skb = sp->out_skb;
590 struct xfrm_usersa_info *p;
591 struct nlmsghdr *nlh;
592 int err;
594 nlh = nlmsg_put(skb, NETLINK_CB(in_skb).pid, sp->nlmsg_seq,
595 XFRM_MSG_NEWSA, sizeof(*p), sp->nlmsg_flags);
596 if (nlh == NULL)
597 return -EMSGSIZE;
599 p = nlmsg_data(nlh);
601 err = copy_to_user_state_extra(x, p, skb);
602 if (err)
603 goto nla_put_failure;
605 nlmsg_end(skb, nlh);
606 return 0;
608 nla_put_failure:
609 nlmsg_cancel(skb, nlh);
610 return err;
613 static int xfrm_dump_sa_done(struct netlink_callback *cb)
615 struct xfrm_state_walk *walk = (struct xfrm_state_walk *) &cb->args[1];
616 xfrm_state_walk_done(walk);
617 return 0;
620 static int xfrm_dump_sa(struct sk_buff *skb, struct netlink_callback *cb)
622 struct net *net = sock_net(skb->sk);
623 struct xfrm_state_walk *walk = (struct xfrm_state_walk *) &cb->args[1];
624 struct xfrm_dump_info info;
626 BUILD_BUG_ON(sizeof(struct xfrm_state_walk) >
627 sizeof(cb->args) - sizeof(cb->args[0]));
629 info.in_skb = cb->skb;
630 info.out_skb = skb;
631 info.nlmsg_seq = cb->nlh->nlmsg_seq;
632 info.nlmsg_flags = NLM_F_MULTI;
634 if (!cb->args[0]) {
635 cb->args[0] = 1;
636 xfrm_state_walk_init(walk, 0);
639 (void) xfrm_state_walk(net, walk, dump_one_state, &info);
641 return skb->len;
644 static struct sk_buff *xfrm_state_netlink(struct sk_buff *in_skb,
645 struct xfrm_state *x, u32 seq)
647 struct xfrm_dump_info info;
648 struct sk_buff *skb;
650 skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC);
651 if (!skb)
652 return ERR_PTR(-ENOMEM);
654 info.in_skb = in_skb;
655 info.out_skb = skb;
656 info.nlmsg_seq = seq;
657 info.nlmsg_flags = 0;
659 if (dump_one_state(x, 0, &info)) {
660 kfree_skb(skb);
661 return NULL;
664 return skb;
667 static inline size_t xfrm_spdinfo_msgsize(void)
669 return NLMSG_ALIGN(4)
670 + nla_total_size(sizeof(struct xfrmu_spdinfo))
671 + nla_total_size(sizeof(struct xfrmu_spdhinfo));
674 static int build_spdinfo(struct sk_buff *skb, u32 pid, u32 seq, u32 flags)
676 struct xfrmk_spdinfo si;
677 struct xfrmu_spdinfo spc;
678 struct xfrmu_spdhinfo sph;
679 struct nlmsghdr *nlh;
680 u32 *f;
682 nlh = nlmsg_put(skb, pid, seq, XFRM_MSG_NEWSPDINFO, sizeof(u32), 0);
683 if (nlh == NULL) /* shouldnt really happen ... */
684 return -EMSGSIZE;
686 f = nlmsg_data(nlh);
687 *f = flags;
688 xfrm_spd_getinfo(&si);
689 spc.incnt = si.incnt;
690 spc.outcnt = si.outcnt;
691 spc.fwdcnt = si.fwdcnt;
692 spc.inscnt = si.inscnt;
693 spc.outscnt = si.outscnt;
694 spc.fwdscnt = si.fwdscnt;
695 sph.spdhcnt = si.spdhcnt;
696 sph.spdhmcnt = si.spdhmcnt;
698 NLA_PUT(skb, XFRMA_SPD_INFO, sizeof(spc), &spc);
699 NLA_PUT(skb, XFRMA_SPD_HINFO, sizeof(sph), &sph);
701 return nlmsg_end(skb, nlh);
703 nla_put_failure:
704 nlmsg_cancel(skb, nlh);
705 return -EMSGSIZE;
708 static int xfrm_get_spdinfo(struct sk_buff *skb, struct nlmsghdr *nlh,
709 struct nlattr **attrs)
711 struct net *net = sock_net(skb->sk);
712 struct sk_buff *r_skb;
713 u32 *flags = nlmsg_data(nlh);
714 u32 spid = NETLINK_CB(skb).pid;
715 u32 seq = nlh->nlmsg_seq;
717 r_skb = nlmsg_new(xfrm_spdinfo_msgsize(), GFP_ATOMIC);
718 if (r_skb == NULL)
719 return -ENOMEM;
721 if (build_spdinfo(r_skb, spid, seq, *flags) < 0)
722 BUG();
724 return nlmsg_unicast(net->xfrm.nlsk, r_skb, spid);
727 static inline size_t xfrm_sadinfo_msgsize(void)
729 return NLMSG_ALIGN(4)
730 + nla_total_size(sizeof(struct xfrmu_sadhinfo))
731 + nla_total_size(4); /* XFRMA_SAD_CNT */
734 static int build_sadinfo(struct sk_buff *skb, u32 pid, u32 seq, u32 flags)
736 struct xfrmk_sadinfo si;
737 struct xfrmu_sadhinfo sh;
738 struct nlmsghdr *nlh;
739 u32 *f;
741 nlh = nlmsg_put(skb, pid, seq, XFRM_MSG_NEWSADINFO, sizeof(u32), 0);
742 if (nlh == NULL) /* shouldnt really happen ... */
743 return -EMSGSIZE;
745 f = nlmsg_data(nlh);
746 *f = flags;
747 xfrm_sad_getinfo(&si);
749 sh.sadhmcnt = si.sadhmcnt;
750 sh.sadhcnt = si.sadhcnt;
752 NLA_PUT_U32(skb, XFRMA_SAD_CNT, si.sadcnt);
753 NLA_PUT(skb, XFRMA_SAD_HINFO, sizeof(sh), &sh);
755 return nlmsg_end(skb, nlh);
757 nla_put_failure:
758 nlmsg_cancel(skb, nlh);
759 return -EMSGSIZE;
762 static int xfrm_get_sadinfo(struct sk_buff *skb, struct nlmsghdr *nlh,
763 struct nlattr **attrs)
765 struct net *net = sock_net(skb->sk);
766 struct sk_buff *r_skb;
767 u32 *flags = nlmsg_data(nlh);
768 u32 spid = NETLINK_CB(skb).pid;
769 u32 seq = nlh->nlmsg_seq;
771 r_skb = nlmsg_new(xfrm_sadinfo_msgsize(), GFP_ATOMIC);
772 if (r_skb == NULL)
773 return -ENOMEM;
775 if (build_sadinfo(r_skb, spid, seq, *flags) < 0)
776 BUG();
778 return nlmsg_unicast(net->xfrm.nlsk, r_skb, spid);
781 static int xfrm_get_sa(struct sk_buff *skb, struct nlmsghdr *nlh,
782 struct nlattr **attrs)
784 struct net *net = sock_net(skb->sk);
785 struct xfrm_usersa_id *p = nlmsg_data(nlh);
786 struct xfrm_state *x;
787 struct sk_buff *resp_skb;
788 int err = -ESRCH;
790 x = xfrm_user_state_lookup(net, p, attrs, &err);
791 if (x == NULL)
792 goto out_noput;
794 resp_skb = xfrm_state_netlink(skb, x, nlh->nlmsg_seq);
795 if (IS_ERR(resp_skb)) {
796 err = PTR_ERR(resp_skb);
797 } else {
798 err = nlmsg_unicast(net->xfrm.nlsk, resp_skb, NETLINK_CB(skb).pid);
800 xfrm_state_put(x);
801 out_noput:
802 return err;
805 static int verify_userspi_info(struct xfrm_userspi_info *p)
807 switch (p->info.id.proto) {
808 case IPPROTO_AH:
809 case IPPROTO_ESP:
810 break;
812 case IPPROTO_COMP:
813 /* IPCOMP spi is 16-bits. */
814 if (p->max >= 0x10000)
815 return -EINVAL;
816 break;
818 default:
819 return -EINVAL;
822 if (p->min > p->max)
823 return -EINVAL;
825 return 0;
828 static int xfrm_alloc_userspi(struct sk_buff *skb, struct nlmsghdr *nlh,
829 struct nlattr **attrs)
831 struct net *net = sock_net(skb->sk);
832 struct xfrm_state *x;
833 struct xfrm_userspi_info *p;
834 struct sk_buff *resp_skb;
835 xfrm_address_t *daddr;
836 int family;
837 int err;
839 p = nlmsg_data(nlh);
840 err = verify_userspi_info(p);
841 if (err)
842 goto out_noput;
844 family = p->info.family;
845 daddr = &p->info.id.daddr;
847 x = NULL;
848 if (p->info.seq) {
849 x = xfrm_find_acq_byseq(net, p->info.seq);
850 if (x && xfrm_addr_cmp(&x->id.daddr, daddr, family)) {
851 xfrm_state_put(x);
852 x = NULL;
856 if (!x)
857 x = xfrm_find_acq(net, p->info.mode, p->info.reqid,
858 p->info.id.proto, daddr,
859 &p->info.saddr, 1,
860 family);
861 err = -ENOENT;
862 if (x == NULL)
863 goto out_noput;
865 err = xfrm_alloc_spi(x, p->min, p->max);
866 if (err)
867 goto out;
869 resp_skb = xfrm_state_netlink(skb, x, nlh->nlmsg_seq);
870 if (IS_ERR(resp_skb)) {
871 err = PTR_ERR(resp_skb);
872 goto out;
875 err = nlmsg_unicast(net->xfrm.nlsk, resp_skb, NETLINK_CB(skb).pid);
877 out:
878 xfrm_state_put(x);
879 out_noput:
880 return err;
883 static int verify_policy_dir(u8 dir)
885 switch (dir) {
886 case XFRM_POLICY_IN:
887 case XFRM_POLICY_OUT:
888 case XFRM_POLICY_FWD:
889 break;
891 default:
892 return -EINVAL;
895 return 0;
898 static int verify_policy_type(u8 type)
900 switch (type) {
901 case XFRM_POLICY_TYPE_MAIN:
902 #ifdef CONFIG_XFRM_SUB_POLICY
903 case XFRM_POLICY_TYPE_SUB:
904 #endif
905 break;
907 default:
908 return -EINVAL;
911 return 0;
914 static int verify_newpolicy_info(struct xfrm_userpolicy_info *p)
916 switch (p->share) {
917 case XFRM_SHARE_ANY:
918 case XFRM_SHARE_SESSION:
919 case XFRM_SHARE_USER:
920 case XFRM_SHARE_UNIQUE:
921 break;
923 default:
924 return -EINVAL;
927 switch (p->action) {
928 case XFRM_POLICY_ALLOW:
929 case XFRM_POLICY_BLOCK:
930 break;
932 default:
933 return -EINVAL;
936 switch (p->sel.family) {
937 case AF_INET:
938 break;
940 case AF_INET6:
941 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
942 break;
943 #else
944 return -EAFNOSUPPORT;
945 #endif
947 default:
948 return -EINVAL;
951 return verify_policy_dir(p->dir);
954 static int copy_from_user_sec_ctx(struct xfrm_policy *pol, struct nlattr **attrs)
956 struct nlattr *rt = attrs[XFRMA_SEC_CTX];
957 struct xfrm_user_sec_ctx *uctx;
959 if (!rt)
960 return 0;
962 uctx = nla_data(rt);
963 return security_xfrm_policy_alloc(&pol->security, uctx);
966 static void copy_templates(struct xfrm_policy *xp, struct xfrm_user_tmpl *ut,
967 int nr)
969 int i;
971 xp->xfrm_nr = nr;
972 for (i = 0; i < nr; i++, ut++) {
973 struct xfrm_tmpl *t = &xp->xfrm_vec[i];
975 memcpy(&t->id, &ut->id, sizeof(struct xfrm_id));
976 memcpy(&t->saddr, &ut->saddr,
977 sizeof(xfrm_address_t));
978 t->reqid = ut->reqid;
979 t->mode = ut->mode;
980 t->share = ut->share;
981 t->optional = ut->optional;
982 t->aalgos = ut->aalgos;
983 t->ealgos = ut->ealgos;
984 t->calgos = ut->calgos;
985 /* If all masks are ~0, then we allow all algorithms. */
986 t->allalgs = !~(t->aalgos & t->ealgos & t->calgos);
987 t->encap_family = ut->family;
991 static int validate_tmpl(int nr, struct xfrm_user_tmpl *ut, u16 family)
993 int i;
995 if (nr > XFRM_MAX_DEPTH)
996 return -EINVAL;
998 for (i = 0; i < nr; i++) {
999 /* We never validated the ut->family value, so many
1000 * applications simply leave it at zero. The check was
1001 * never made and ut->family was ignored because all
1002 * templates could be assumed to have the same family as
1003 * the policy itself. Now that we will have ipv4-in-ipv6
1004 * and ipv6-in-ipv4 tunnels, this is no longer true.
1006 if (!ut[i].family)
1007 ut[i].family = family;
1009 switch (ut[i].family) {
1010 case AF_INET:
1011 break;
1012 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
1013 case AF_INET6:
1014 break;
1015 #endif
1016 default:
1017 return -EINVAL;
1021 return 0;
1024 static int copy_from_user_tmpl(struct xfrm_policy *pol, struct nlattr **attrs)
1026 struct nlattr *rt = attrs[XFRMA_TMPL];
1028 if (!rt) {
1029 pol->xfrm_nr = 0;
1030 } else {
1031 struct xfrm_user_tmpl *utmpl = nla_data(rt);
1032 int nr = nla_len(rt) / sizeof(*utmpl);
1033 int err;
1035 err = validate_tmpl(nr, utmpl, pol->family);
1036 if (err)
1037 return err;
1039 copy_templates(pol, utmpl, nr);
1041 return 0;
1044 static int copy_from_user_policy_type(u8 *tp, struct nlattr **attrs)
1046 struct nlattr *rt = attrs[XFRMA_POLICY_TYPE];
1047 struct xfrm_userpolicy_type *upt;
1048 u8 type = XFRM_POLICY_TYPE_MAIN;
1049 int err;
1051 if (rt) {
1052 upt = nla_data(rt);
1053 type = upt->type;
1056 err = verify_policy_type(type);
1057 if (err)
1058 return err;
1060 *tp = type;
1061 return 0;
1064 static void copy_from_user_policy(struct xfrm_policy *xp, struct xfrm_userpolicy_info *p)
1066 xp->priority = p->priority;
1067 xp->index = p->index;
1068 memcpy(&xp->selector, &p->sel, sizeof(xp->selector));
1069 memcpy(&xp->lft, &p->lft, sizeof(xp->lft));
1070 xp->action = p->action;
1071 xp->flags = p->flags;
1072 xp->family = p->sel.family;
1073 /* XXX xp->share = p->share; */
1076 static void copy_to_user_policy(struct xfrm_policy *xp, struct xfrm_userpolicy_info *p, int dir)
1078 memcpy(&p->sel, &xp->selector, sizeof(p->sel));
1079 memcpy(&p->lft, &xp->lft, sizeof(p->lft));
1080 memcpy(&p->curlft, &xp->curlft, sizeof(p->curlft));
1081 p->priority = xp->priority;
1082 p->index = xp->index;
1083 p->sel.family = xp->family;
1084 p->dir = dir;
1085 p->action = xp->action;
1086 p->flags = xp->flags;
1087 p->share = XFRM_SHARE_ANY; /* XXX xp->share */
1090 static struct xfrm_policy *xfrm_policy_construct(struct net *net, struct xfrm_userpolicy_info *p, struct nlattr **attrs, int *errp)
1092 struct xfrm_policy *xp = xfrm_policy_alloc(net, GFP_KERNEL);
1093 int err;
1095 if (!xp) {
1096 *errp = -ENOMEM;
1097 return NULL;
1100 copy_from_user_policy(xp, p);
1102 err = copy_from_user_policy_type(&xp->type, attrs);
1103 if (err)
1104 goto error;
1106 if (!(err = copy_from_user_tmpl(xp, attrs)))
1107 err = copy_from_user_sec_ctx(xp, attrs);
1108 if (err)
1109 goto error;
1111 return xp;
1112 error:
1113 *errp = err;
1114 xp->walk.dead = 1;
1115 xfrm_policy_destroy(xp);
1116 return NULL;
1119 static int xfrm_add_policy(struct sk_buff *skb, struct nlmsghdr *nlh,
1120 struct nlattr **attrs)
1122 struct net *net = sock_net(skb->sk);
1123 struct xfrm_userpolicy_info *p = nlmsg_data(nlh);
1124 struct xfrm_policy *xp;
1125 struct km_event c;
1126 int err;
1127 int excl;
1128 uid_t loginuid = NETLINK_CB(skb).loginuid;
1129 u32 sessionid = NETLINK_CB(skb).sessionid;
1130 u32 sid = NETLINK_CB(skb).sid;
1132 err = verify_newpolicy_info(p);
1133 if (err)
1134 return err;
1135 err = verify_sec_ctx_len(attrs);
1136 if (err)
1137 return err;
1139 xp = xfrm_policy_construct(net, p, attrs, &err);
1140 if (!xp)
1141 return err;
1143 /* shouldnt excl be based on nlh flags??
1144 * Aha! this is anti-netlink really i.e more pfkey derived
1145 * in netlink excl is a flag and you wouldnt need
1146 * a type XFRM_MSG_UPDPOLICY - JHS */
1147 excl = nlh->nlmsg_type == XFRM_MSG_NEWPOLICY;
1148 err = xfrm_policy_insert(p->dir, xp, excl);
1149 xfrm_audit_policy_add(xp, err ? 0 : 1, loginuid, sessionid, sid);
1151 if (err) {
1152 security_xfrm_policy_free(xp->security);
1153 kfree(xp);
1154 return err;
1157 c.event = nlh->nlmsg_type;
1158 c.seq = nlh->nlmsg_seq;
1159 c.pid = nlh->nlmsg_pid;
1160 km_policy_notify(xp, p->dir, &c);
1162 xfrm_pol_put(xp);
1164 return 0;
1167 static int copy_to_user_tmpl(struct xfrm_policy *xp, struct sk_buff *skb)
1169 struct xfrm_user_tmpl vec[XFRM_MAX_DEPTH];
1170 int i;
1172 if (xp->xfrm_nr == 0)
1173 return 0;
1175 for (i = 0; i < xp->xfrm_nr; i++) {
1176 struct xfrm_user_tmpl *up = &vec[i];
1177 struct xfrm_tmpl *kp = &xp->xfrm_vec[i];
1179 memcpy(&up->id, &kp->id, sizeof(up->id));
1180 up->family = kp->encap_family;
1181 memcpy(&up->saddr, &kp->saddr, sizeof(up->saddr));
1182 up->reqid = kp->reqid;
1183 up->mode = kp->mode;
1184 up->share = kp->share;
1185 up->optional = kp->optional;
1186 up->aalgos = kp->aalgos;
1187 up->ealgos = kp->ealgos;
1188 up->calgos = kp->calgos;
1191 return nla_put(skb, XFRMA_TMPL,
1192 sizeof(struct xfrm_user_tmpl) * xp->xfrm_nr, vec);
1195 static inline int copy_to_user_state_sec_ctx(struct xfrm_state *x, struct sk_buff *skb)
1197 if (x->security) {
1198 return copy_sec_ctx(x->security, skb);
1200 return 0;
1203 static inline int copy_to_user_sec_ctx(struct xfrm_policy *xp, struct sk_buff *skb)
1205 if (xp->security) {
1206 return copy_sec_ctx(xp->security, skb);
1208 return 0;
1210 static inline size_t userpolicy_type_attrsize(void)
1212 #ifdef CONFIG_XFRM_SUB_POLICY
1213 return nla_total_size(sizeof(struct xfrm_userpolicy_type));
1214 #else
1215 return 0;
1216 #endif
1219 #ifdef CONFIG_XFRM_SUB_POLICY
1220 static int copy_to_user_policy_type(u8 type, struct sk_buff *skb)
1222 struct xfrm_userpolicy_type upt = {
1223 .type = type,
1226 return nla_put(skb, XFRMA_POLICY_TYPE, sizeof(upt), &upt);
1229 #else
1230 static inline int copy_to_user_policy_type(u8 type, struct sk_buff *skb)
1232 return 0;
1234 #endif
1236 static int dump_one_policy(struct xfrm_policy *xp, int dir, int count, void *ptr)
1238 struct xfrm_dump_info *sp = ptr;
1239 struct xfrm_userpolicy_info *p;
1240 struct sk_buff *in_skb = sp->in_skb;
1241 struct sk_buff *skb = sp->out_skb;
1242 struct nlmsghdr *nlh;
1244 nlh = nlmsg_put(skb, NETLINK_CB(in_skb).pid, sp->nlmsg_seq,
1245 XFRM_MSG_NEWPOLICY, sizeof(*p), sp->nlmsg_flags);
1246 if (nlh == NULL)
1247 return -EMSGSIZE;
1249 p = nlmsg_data(nlh);
1250 copy_to_user_policy(xp, p, dir);
1251 if (copy_to_user_tmpl(xp, skb) < 0)
1252 goto nlmsg_failure;
1253 if (copy_to_user_sec_ctx(xp, skb))
1254 goto nlmsg_failure;
1255 if (copy_to_user_policy_type(xp->type, skb) < 0)
1256 goto nlmsg_failure;
1258 nlmsg_end(skb, nlh);
1259 return 0;
1261 nlmsg_failure:
1262 nlmsg_cancel(skb, nlh);
1263 return -EMSGSIZE;
1266 static int xfrm_dump_policy_done(struct netlink_callback *cb)
1268 struct xfrm_policy_walk *walk = (struct xfrm_policy_walk *) &cb->args[1];
1270 xfrm_policy_walk_done(walk);
1271 return 0;
1274 static int xfrm_dump_policy(struct sk_buff *skb, struct netlink_callback *cb)
1276 struct net *net = sock_net(skb->sk);
1277 struct xfrm_policy_walk *walk = (struct xfrm_policy_walk *) &cb->args[1];
1278 struct xfrm_dump_info info;
1280 BUILD_BUG_ON(sizeof(struct xfrm_policy_walk) >
1281 sizeof(cb->args) - sizeof(cb->args[0]));
1283 info.in_skb = cb->skb;
1284 info.out_skb = skb;
1285 info.nlmsg_seq = cb->nlh->nlmsg_seq;
1286 info.nlmsg_flags = NLM_F_MULTI;
1288 if (!cb->args[0]) {
1289 cb->args[0] = 1;
1290 xfrm_policy_walk_init(walk, XFRM_POLICY_TYPE_ANY);
1293 (void) xfrm_policy_walk(net, walk, dump_one_policy, &info);
1295 return skb->len;
1298 static struct sk_buff *xfrm_policy_netlink(struct sk_buff *in_skb,
1299 struct xfrm_policy *xp,
1300 int dir, u32 seq)
1302 struct xfrm_dump_info info;
1303 struct sk_buff *skb;
1305 skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
1306 if (!skb)
1307 return ERR_PTR(-ENOMEM);
1309 info.in_skb = in_skb;
1310 info.out_skb = skb;
1311 info.nlmsg_seq = seq;
1312 info.nlmsg_flags = 0;
1314 if (dump_one_policy(xp, dir, 0, &info) < 0) {
1315 kfree_skb(skb);
1316 return NULL;
1319 return skb;
1322 static int xfrm_get_policy(struct sk_buff *skb, struct nlmsghdr *nlh,
1323 struct nlattr **attrs)
1325 struct net *net = sock_net(skb->sk);
1326 struct xfrm_policy *xp;
1327 struct xfrm_userpolicy_id *p;
1328 u8 type = XFRM_POLICY_TYPE_MAIN;
1329 int err;
1330 struct km_event c;
1331 int delete;
1333 p = nlmsg_data(nlh);
1334 delete = nlh->nlmsg_type == XFRM_MSG_DELPOLICY;
1336 err = copy_from_user_policy_type(&type, attrs);
1337 if (err)
1338 return err;
1340 err = verify_policy_dir(p->dir);
1341 if (err)
1342 return err;
1344 if (p->index)
1345 xp = xfrm_policy_byid(net, type, p->dir, p->index, delete, &err);
1346 else {
1347 struct nlattr *rt = attrs[XFRMA_SEC_CTX];
1348 struct xfrm_sec_ctx *ctx;
1350 err = verify_sec_ctx_len(attrs);
1351 if (err)
1352 return err;
1354 ctx = NULL;
1355 if (rt) {
1356 struct xfrm_user_sec_ctx *uctx = nla_data(rt);
1358 err = security_xfrm_policy_alloc(&ctx, uctx);
1359 if (err)
1360 return err;
1362 xp = xfrm_policy_bysel_ctx(net, type, p->dir, &p->sel, ctx,
1363 delete, &err);
1364 security_xfrm_policy_free(ctx);
1366 if (xp == NULL)
1367 return -ENOENT;
1369 if (!delete) {
1370 struct sk_buff *resp_skb;
1372 resp_skb = xfrm_policy_netlink(skb, xp, p->dir, nlh->nlmsg_seq);
1373 if (IS_ERR(resp_skb)) {
1374 err = PTR_ERR(resp_skb);
1375 } else {
1376 err = nlmsg_unicast(net->xfrm.nlsk, resp_skb,
1377 NETLINK_CB(skb).pid);
1379 } else {
1380 uid_t loginuid = NETLINK_CB(skb).loginuid;
1381 u32 sessionid = NETLINK_CB(skb).sessionid;
1382 u32 sid = NETLINK_CB(skb).sid;
1384 xfrm_audit_policy_delete(xp, err ? 0 : 1, loginuid, sessionid,
1385 sid);
1387 if (err != 0)
1388 goto out;
1390 c.data.byid = p->index;
1391 c.event = nlh->nlmsg_type;
1392 c.seq = nlh->nlmsg_seq;
1393 c.pid = nlh->nlmsg_pid;
1394 km_policy_notify(xp, p->dir, &c);
1397 out:
1398 xfrm_pol_put(xp);
1399 return err;
1402 static int xfrm_flush_sa(struct sk_buff *skb, struct nlmsghdr *nlh,
1403 struct nlattr **attrs)
1405 struct net *net = sock_net(skb->sk);
1406 struct km_event c;
1407 struct xfrm_usersa_flush *p = nlmsg_data(nlh);
1408 struct xfrm_audit audit_info;
1409 int err;
1411 audit_info.loginuid = NETLINK_CB(skb).loginuid;
1412 audit_info.sessionid = NETLINK_CB(skb).sessionid;
1413 audit_info.secid = NETLINK_CB(skb).sid;
1414 err = xfrm_state_flush(net, p->proto, &audit_info);
1415 if (err)
1416 return err;
1417 c.data.proto = p->proto;
1418 c.event = nlh->nlmsg_type;
1419 c.seq = nlh->nlmsg_seq;
1420 c.pid = nlh->nlmsg_pid;
1421 c.net = net;
1422 km_state_notify(NULL, &c);
1424 return 0;
1427 static inline size_t xfrm_aevent_msgsize(void)
1429 return NLMSG_ALIGN(sizeof(struct xfrm_aevent_id))
1430 + nla_total_size(sizeof(struct xfrm_replay_state))
1431 + nla_total_size(sizeof(struct xfrm_lifetime_cur))
1432 + nla_total_size(4) /* XFRM_AE_RTHR */
1433 + nla_total_size(4); /* XFRM_AE_ETHR */
1436 static int build_aevent(struct sk_buff *skb, struct xfrm_state *x, struct km_event *c)
1438 struct xfrm_aevent_id *id;
1439 struct nlmsghdr *nlh;
1441 nlh = nlmsg_put(skb, c->pid, c->seq, XFRM_MSG_NEWAE, sizeof(*id), 0);
1442 if (nlh == NULL)
1443 return -EMSGSIZE;
1445 id = nlmsg_data(nlh);
1446 memcpy(&id->sa_id.daddr, &x->id.daddr,sizeof(x->id.daddr));
1447 id->sa_id.spi = x->id.spi;
1448 id->sa_id.family = x->props.family;
1449 id->sa_id.proto = x->id.proto;
1450 memcpy(&id->saddr, &x->props.saddr,sizeof(x->props.saddr));
1451 id->reqid = x->props.reqid;
1452 id->flags = c->data.aevent;
1454 NLA_PUT(skb, XFRMA_REPLAY_VAL, sizeof(x->replay), &x->replay);
1455 NLA_PUT(skb, XFRMA_LTIME_VAL, sizeof(x->curlft), &x->curlft);
1457 if (id->flags & XFRM_AE_RTHR)
1458 NLA_PUT_U32(skb, XFRMA_REPLAY_THRESH, x->replay_maxdiff);
1460 if (id->flags & XFRM_AE_ETHR)
1461 NLA_PUT_U32(skb, XFRMA_ETIMER_THRESH,
1462 x->replay_maxage * 10 / HZ);
1464 return nlmsg_end(skb, nlh);
1466 nla_put_failure:
1467 nlmsg_cancel(skb, nlh);
1468 return -EMSGSIZE;
1471 static int xfrm_get_ae(struct sk_buff *skb, struct nlmsghdr *nlh,
1472 struct nlattr **attrs)
1474 struct net *net = sock_net(skb->sk);
1475 struct xfrm_state *x;
1476 struct sk_buff *r_skb;
1477 int err;
1478 struct km_event c;
1479 struct xfrm_aevent_id *p = nlmsg_data(nlh);
1480 struct xfrm_usersa_id *id = &p->sa_id;
1482 r_skb = nlmsg_new(xfrm_aevent_msgsize(), GFP_ATOMIC);
1483 if (r_skb == NULL)
1484 return -ENOMEM;
1486 x = xfrm_state_lookup(net, &id->daddr, id->spi, id->proto, id->family);
1487 if (x == NULL) {
1488 kfree_skb(r_skb);
1489 return -ESRCH;
1493 * XXX: is this lock really needed - none of the other
1494 * gets lock (the concern is things getting updated
1495 * while we are still reading) - jhs
1497 spin_lock_bh(&x->lock);
1498 c.data.aevent = p->flags;
1499 c.seq = nlh->nlmsg_seq;
1500 c.pid = nlh->nlmsg_pid;
1502 if (build_aevent(r_skb, x, &c) < 0)
1503 BUG();
1504 err = nlmsg_unicast(net->xfrm.nlsk, r_skb, NETLINK_CB(skb).pid);
1505 spin_unlock_bh(&x->lock);
1506 xfrm_state_put(x);
1507 return err;
1510 static int xfrm_new_ae(struct sk_buff *skb, struct nlmsghdr *nlh,
1511 struct nlattr **attrs)
1513 struct net *net = sock_net(skb->sk);
1514 struct xfrm_state *x;
1515 struct km_event c;
1516 int err = - EINVAL;
1517 struct xfrm_aevent_id *p = nlmsg_data(nlh);
1518 struct nlattr *rp = attrs[XFRMA_REPLAY_VAL];
1519 struct nlattr *lt = attrs[XFRMA_LTIME_VAL];
1521 if (!lt && !rp)
1522 return err;
1524 /* pedantic mode - thou shalt sayeth replaceth */
1525 if (!(nlh->nlmsg_flags&NLM_F_REPLACE))
1526 return err;
1528 x = xfrm_state_lookup(net, &p->sa_id.daddr, p->sa_id.spi, p->sa_id.proto, p->sa_id.family);
1529 if (x == NULL)
1530 return -ESRCH;
1532 if (x->km.state != XFRM_STATE_VALID)
1533 goto out;
1535 spin_lock_bh(&x->lock);
1536 xfrm_update_ae_params(x, attrs);
1537 spin_unlock_bh(&x->lock);
1539 c.event = nlh->nlmsg_type;
1540 c.seq = nlh->nlmsg_seq;
1541 c.pid = nlh->nlmsg_pid;
1542 c.data.aevent = XFRM_AE_CU;
1543 km_state_notify(x, &c);
1544 err = 0;
1545 out:
1546 xfrm_state_put(x);
1547 return err;
1550 static int xfrm_flush_policy(struct sk_buff *skb, struct nlmsghdr *nlh,
1551 struct nlattr **attrs)
1553 struct net *net = sock_net(skb->sk);
1554 struct km_event c;
1555 u8 type = XFRM_POLICY_TYPE_MAIN;
1556 int err;
1557 struct xfrm_audit audit_info;
1559 err = copy_from_user_policy_type(&type, attrs);
1560 if (err)
1561 return err;
1563 audit_info.loginuid = NETLINK_CB(skb).loginuid;
1564 audit_info.sessionid = NETLINK_CB(skb).sessionid;
1565 audit_info.secid = NETLINK_CB(skb).sid;
1566 err = xfrm_policy_flush(net, type, &audit_info);
1567 if (err)
1568 return err;
1569 c.data.type = type;
1570 c.event = nlh->nlmsg_type;
1571 c.seq = nlh->nlmsg_seq;
1572 c.pid = nlh->nlmsg_pid;
1573 c.net = net;
1574 km_policy_notify(NULL, 0, &c);
1575 return 0;
1578 static int xfrm_add_pol_expire(struct sk_buff *skb, struct nlmsghdr *nlh,
1579 struct nlattr **attrs)
1581 struct net *net = sock_net(skb->sk);
1582 struct xfrm_policy *xp;
1583 struct xfrm_user_polexpire *up = nlmsg_data(nlh);
1584 struct xfrm_userpolicy_info *p = &up->pol;
1585 u8 type = XFRM_POLICY_TYPE_MAIN;
1586 int err = -ENOENT;
1588 err = copy_from_user_policy_type(&type, attrs);
1589 if (err)
1590 return err;
1592 if (p->index)
1593 xp = xfrm_policy_byid(net, type, p->dir, p->index, 0, &err);
1594 else {
1595 struct nlattr *rt = attrs[XFRMA_SEC_CTX];
1596 struct xfrm_sec_ctx *ctx;
1598 err = verify_sec_ctx_len(attrs);
1599 if (err)
1600 return err;
1602 ctx = NULL;
1603 if (rt) {
1604 struct xfrm_user_sec_ctx *uctx = nla_data(rt);
1606 err = security_xfrm_policy_alloc(&ctx, uctx);
1607 if (err)
1608 return err;
1610 xp = xfrm_policy_bysel_ctx(net, type, p->dir, &p->sel, ctx, 0, &err);
1611 security_xfrm_policy_free(ctx);
1613 if (xp == NULL)
1614 return -ENOENT;
1616 read_lock(&xp->lock);
1617 if (xp->walk.dead) {
1618 read_unlock(&xp->lock);
1619 goto out;
1622 read_unlock(&xp->lock);
1623 err = 0;
1624 if (up->hard) {
1625 uid_t loginuid = NETLINK_CB(skb).loginuid;
1626 uid_t sessionid = NETLINK_CB(skb).sessionid;
1627 u32 sid = NETLINK_CB(skb).sid;
1628 xfrm_policy_delete(xp, p->dir);
1629 xfrm_audit_policy_delete(xp, 1, loginuid, sessionid, sid);
1631 } else {
1632 // reset the timers here?
1633 printk("Dont know what to do with soft policy expire\n");
1635 km_policy_expired(xp, p->dir, up->hard, current->pid);
1637 out:
1638 xfrm_pol_put(xp);
1639 return err;
1642 static int xfrm_add_sa_expire(struct sk_buff *skb, struct nlmsghdr *nlh,
1643 struct nlattr **attrs)
1645 struct net *net = sock_net(skb->sk);
1646 struct xfrm_state *x;
1647 int err;
1648 struct xfrm_user_expire *ue = nlmsg_data(nlh);
1649 struct xfrm_usersa_info *p = &ue->state;
1651 x = xfrm_state_lookup(net, &p->id.daddr, p->id.spi, p->id.proto, p->family);
1653 err = -ENOENT;
1654 if (x == NULL)
1655 return err;
1657 spin_lock_bh(&x->lock);
1658 err = -EINVAL;
1659 if (x->km.state != XFRM_STATE_VALID)
1660 goto out;
1661 km_state_expired(x, ue->hard, current->pid);
1663 if (ue->hard) {
1664 uid_t loginuid = NETLINK_CB(skb).loginuid;
1665 uid_t sessionid = NETLINK_CB(skb).sessionid;
1666 u32 sid = NETLINK_CB(skb).sid;
1667 __xfrm_state_delete(x);
1668 xfrm_audit_state_delete(x, 1, loginuid, sessionid, sid);
1670 err = 0;
1671 out:
1672 spin_unlock_bh(&x->lock);
1673 xfrm_state_put(x);
1674 return err;
1677 static int xfrm_add_acquire(struct sk_buff *skb, struct nlmsghdr *nlh,
1678 struct nlattr **attrs)
1680 struct net *net = sock_net(skb->sk);
1681 struct xfrm_policy *xp;
1682 struct xfrm_user_tmpl *ut;
1683 int i;
1684 struct nlattr *rt = attrs[XFRMA_TMPL];
1686 struct xfrm_user_acquire *ua = nlmsg_data(nlh);
1687 struct xfrm_state *x = xfrm_state_alloc(net);
1688 int err = -ENOMEM;
1690 if (!x)
1691 goto nomem;
1693 err = verify_newpolicy_info(&ua->policy);
1694 if (err)
1695 goto bad_policy;
1697 /* build an XP */
1698 xp = xfrm_policy_construct(net, &ua->policy, attrs, &err);
1699 if (!xp)
1700 goto free_state;
1702 memcpy(&x->id, &ua->id, sizeof(ua->id));
1703 memcpy(&x->props.saddr, &ua->saddr, sizeof(ua->saddr));
1704 memcpy(&x->sel, &ua->sel, sizeof(ua->sel));
1706 ut = nla_data(rt);
1707 /* extract the templates and for each call km_key */
1708 for (i = 0; i < xp->xfrm_nr; i++, ut++) {
1709 struct xfrm_tmpl *t = &xp->xfrm_vec[i];
1710 memcpy(&x->id, &t->id, sizeof(x->id));
1711 x->props.mode = t->mode;
1712 x->props.reqid = t->reqid;
1713 x->props.family = ut->family;
1714 t->aalgos = ua->aalgos;
1715 t->ealgos = ua->ealgos;
1716 t->calgos = ua->calgos;
1717 err = km_query(x, t, xp);
1721 kfree(x);
1722 kfree(xp);
1724 return 0;
1726 bad_policy:
1727 printk("BAD policy passed\n");
1728 free_state:
1729 kfree(x);
1730 nomem:
1731 return err;
1734 #ifdef CONFIG_XFRM_MIGRATE
1735 static int copy_from_user_migrate(struct xfrm_migrate *ma,
1736 struct xfrm_kmaddress *k,
1737 struct nlattr **attrs, int *num)
1739 struct nlattr *rt = attrs[XFRMA_MIGRATE];
1740 struct xfrm_user_migrate *um;
1741 int i, num_migrate;
1743 if (k != NULL) {
1744 struct xfrm_user_kmaddress *uk;
1746 uk = nla_data(attrs[XFRMA_KMADDRESS]);
1747 memcpy(&k->local, &uk->local, sizeof(k->local));
1748 memcpy(&k->remote, &uk->remote, sizeof(k->remote));
1749 k->family = uk->family;
1750 k->reserved = uk->reserved;
1753 um = nla_data(rt);
1754 num_migrate = nla_len(rt) / sizeof(*um);
1756 if (num_migrate <= 0 || num_migrate > XFRM_MAX_DEPTH)
1757 return -EINVAL;
1759 for (i = 0; i < num_migrate; i++, um++, ma++) {
1760 memcpy(&ma->old_daddr, &um->old_daddr, sizeof(ma->old_daddr));
1761 memcpy(&ma->old_saddr, &um->old_saddr, sizeof(ma->old_saddr));
1762 memcpy(&ma->new_daddr, &um->new_daddr, sizeof(ma->new_daddr));
1763 memcpy(&ma->new_saddr, &um->new_saddr, sizeof(ma->new_saddr));
1765 ma->proto = um->proto;
1766 ma->mode = um->mode;
1767 ma->reqid = um->reqid;
1769 ma->old_family = um->old_family;
1770 ma->new_family = um->new_family;
1773 *num = i;
1774 return 0;
1777 static int xfrm_do_migrate(struct sk_buff *skb, struct nlmsghdr *nlh,
1778 struct nlattr **attrs)
1780 struct xfrm_userpolicy_id *pi = nlmsg_data(nlh);
1781 struct xfrm_migrate m[XFRM_MAX_DEPTH];
1782 struct xfrm_kmaddress km, *kmp;
1783 u8 type;
1784 int err;
1785 int n = 0;
1787 if (attrs[XFRMA_MIGRATE] == NULL)
1788 return -EINVAL;
1790 kmp = attrs[XFRMA_KMADDRESS] ? &km : NULL;
1792 err = copy_from_user_policy_type(&type, attrs);
1793 if (err)
1794 return err;
1796 err = copy_from_user_migrate((struct xfrm_migrate *)m, kmp, attrs, &n);
1797 if (err)
1798 return err;
1800 if (!n)
1801 return 0;
1803 xfrm_migrate(&pi->sel, pi->dir, type, m, n, kmp);
1805 return 0;
1807 #else
1808 static int xfrm_do_migrate(struct sk_buff *skb, struct nlmsghdr *nlh,
1809 struct nlattr **attrs)
1811 return -ENOPROTOOPT;
1813 #endif
1815 #ifdef CONFIG_XFRM_MIGRATE
1816 static int copy_to_user_migrate(struct xfrm_migrate *m, struct sk_buff *skb)
1818 struct xfrm_user_migrate um;
1820 memset(&um, 0, sizeof(um));
1821 um.proto = m->proto;
1822 um.mode = m->mode;
1823 um.reqid = m->reqid;
1824 um.old_family = m->old_family;
1825 memcpy(&um.old_daddr, &m->old_daddr, sizeof(um.old_daddr));
1826 memcpy(&um.old_saddr, &m->old_saddr, sizeof(um.old_saddr));
1827 um.new_family = m->new_family;
1828 memcpy(&um.new_daddr, &m->new_daddr, sizeof(um.new_daddr));
1829 memcpy(&um.new_saddr, &m->new_saddr, sizeof(um.new_saddr));
1831 return nla_put(skb, XFRMA_MIGRATE, sizeof(um), &um);
1834 static int copy_to_user_kmaddress(struct xfrm_kmaddress *k, struct sk_buff *skb)
1836 struct xfrm_user_kmaddress uk;
1838 memset(&uk, 0, sizeof(uk));
1839 uk.family = k->family;
1840 uk.reserved = k->reserved;
1841 memcpy(&uk.local, &k->local, sizeof(uk.local));
1842 memcpy(&uk.remote, &k->remote, sizeof(uk.remote));
1844 return nla_put(skb, XFRMA_KMADDRESS, sizeof(uk), &uk);
1847 static inline size_t xfrm_migrate_msgsize(int num_migrate, int with_kma)
1849 return NLMSG_ALIGN(sizeof(struct xfrm_userpolicy_id))
1850 + (with_kma ? nla_total_size(sizeof(struct xfrm_kmaddress)) : 0)
1851 + nla_total_size(sizeof(struct xfrm_user_migrate) * num_migrate)
1852 + userpolicy_type_attrsize();
1855 static int build_migrate(struct sk_buff *skb, struct xfrm_migrate *m,
1856 int num_migrate, struct xfrm_kmaddress *k,
1857 struct xfrm_selector *sel, u8 dir, u8 type)
1859 struct xfrm_migrate *mp;
1860 struct xfrm_userpolicy_id *pol_id;
1861 struct nlmsghdr *nlh;
1862 int i;
1864 nlh = nlmsg_put(skb, 0, 0, XFRM_MSG_MIGRATE, sizeof(*pol_id), 0);
1865 if (nlh == NULL)
1866 return -EMSGSIZE;
1868 pol_id = nlmsg_data(nlh);
1869 /* copy data from selector, dir, and type to the pol_id */
1870 memset(pol_id, 0, sizeof(*pol_id));
1871 memcpy(&pol_id->sel, sel, sizeof(pol_id->sel));
1872 pol_id->dir = dir;
1874 if (k != NULL && (copy_to_user_kmaddress(k, skb) < 0))
1875 goto nlmsg_failure;
1877 if (copy_to_user_policy_type(type, skb) < 0)
1878 goto nlmsg_failure;
1880 for (i = 0, mp = m ; i < num_migrate; i++, mp++) {
1881 if (copy_to_user_migrate(mp, skb) < 0)
1882 goto nlmsg_failure;
1885 return nlmsg_end(skb, nlh);
1886 nlmsg_failure:
1887 nlmsg_cancel(skb, nlh);
1888 return -EMSGSIZE;
1891 static int xfrm_send_migrate(struct xfrm_selector *sel, u8 dir, u8 type,
1892 struct xfrm_migrate *m, int num_migrate,
1893 struct xfrm_kmaddress *k)
1895 struct net *net = &init_net;
1896 struct sk_buff *skb;
1898 skb = nlmsg_new(xfrm_migrate_msgsize(num_migrate, !!k), GFP_ATOMIC);
1899 if (skb == NULL)
1900 return -ENOMEM;
1902 /* build migrate */
1903 if (build_migrate(skb, m, num_migrate, k, sel, dir, type) < 0)
1904 BUG();
1906 return nlmsg_multicast(net->xfrm.nlsk, skb, 0, XFRMNLGRP_MIGRATE, GFP_ATOMIC);
1908 #else
1909 static int xfrm_send_migrate(struct xfrm_selector *sel, u8 dir, u8 type,
1910 struct xfrm_migrate *m, int num_migrate,
1911 struct xfrm_kmaddress *k)
1913 return -ENOPROTOOPT;
1915 #endif
1917 #define XMSGSIZE(type) sizeof(struct type)
1919 static const int xfrm_msg_min[XFRM_NR_MSGTYPES] = {
1920 [XFRM_MSG_NEWSA - XFRM_MSG_BASE] = XMSGSIZE(xfrm_usersa_info),
1921 [XFRM_MSG_DELSA - XFRM_MSG_BASE] = XMSGSIZE(xfrm_usersa_id),
1922 [XFRM_MSG_GETSA - XFRM_MSG_BASE] = XMSGSIZE(xfrm_usersa_id),
1923 [XFRM_MSG_NEWPOLICY - XFRM_MSG_BASE] = XMSGSIZE(xfrm_userpolicy_info),
1924 [XFRM_MSG_DELPOLICY - XFRM_MSG_BASE] = XMSGSIZE(xfrm_userpolicy_id),
1925 [XFRM_MSG_GETPOLICY - XFRM_MSG_BASE] = XMSGSIZE(xfrm_userpolicy_id),
1926 [XFRM_MSG_ALLOCSPI - XFRM_MSG_BASE] = XMSGSIZE(xfrm_userspi_info),
1927 [XFRM_MSG_ACQUIRE - XFRM_MSG_BASE] = XMSGSIZE(xfrm_user_acquire),
1928 [XFRM_MSG_EXPIRE - XFRM_MSG_BASE] = XMSGSIZE(xfrm_user_expire),
1929 [XFRM_MSG_UPDPOLICY - XFRM_MSG_BASE] = XMSGSIZE(xfrm_userpolicy_info),
1930 [XFRM_MSG_UPDSA - XFRM_MSG_BASE] = XMSGSIZE(xfrm_usersa_info),
1931 [XFRM_MSG_POLEXPIRE - XFRM_MSG_BASE] = XMSGSIZE(xfrm_user_polexpire),
1932 [XFRM_MSG_FLUSHSA - XFRM_MSG_BASE] = XMSGSIZE(xfrm_usersa_flush),
1933 [XFRM_MSG_FLUSHPOLICY - XFRM_MSG_BASE] = 0,
1934 [XFRM_MSG_NEWAE - XFRM_MSG_BASE] = XMSGSIZE(xfrm_aevent_id),
1935 [XFRM_MSG_GETAE - XFRM_MSG_BASE] = XMSGSIZE(xfrm_aevent_id),
1936 [XFRM_MSG_REPORT - XFRM_MSG_BASE] = XMSGSIZE(xfrm_user_report),
1937 [XFRM_MSG_MIGRATE - XFRM_MSG_BASE] = XMSGSIZE(xfrm_userpolicy_id),
1938 [XFRM_MSG_GETSADINFO - XFRM_MSG_BASE] = sizeof(u32),
1939 [XFRM_MSG_GETSPDINFO - XFRM_MSG_BASE] = sizeof(u32),
1942 #undef XMSGSIZE
1944 static const struct nla_policy xfrma_policy[XFRMA_MAX+1] = {
1945 [XFRMA_ALG_AEAD] = { .len = sizeof(struct xfrm_algo_aead) },
1946 [XFRMA_ALG_AUTH] = { .len = sizeof(struct xfrm_algo) },
1947 [XFRMA_ALG_CRYPT] = { .len = sizeof(struct xfrm_algo) },
1948 [XFRMA_ALG_COMP] = { .len = sizeof(struct xfrm_algo) },
1949 [XFRMA_ENCAP] = { .len = sizeof(struct xfrm_encap_tmpl) },
1950 [XFRMA_TMPL] = { .len = sizeof(struct xfrm_user_tmpl) },
1951 [XFRMA_SEC_CTX] = { .len = sizeof(struct xfrm_sec_ctx) },
1952 [XFRMA_LTIME_VAL] = { .len = sizeof(struct xfrm_lifetime_cur) },
1953 [XFRMA_REPLAY_VAL] = { .len = sizeof(struct xfrm_replay_state) },
1954 [XFRMA_REPLAY_THRESH] = { .type = NLA_U32 },
1955 [XFRMA_ETIMER_THRESH] = { .type = NLA_U32 },
1956 [XFRMA_SRCADDR] = { .len = sizeof(xfrm_address_t) },
1957 [XFRMA_COADDR] = { .len = sizeof(xfrm_address_t) },
1958 [XFRMA_POLICY_TYPE] = { .len = sizeof(struct xfrm_userpolicy_type)},
1959 [XFRMA_MIGRATE] = { .len = sizeof(struct xfrm_user_migrate) },
1960 [XFRMA_KMADDRESS] = { .len = sizeof(struct xfrm_user_kmaddress) },
1963 static struct xfrm_link {
1964 int (*doit)(struct sk_buff *, struct nlmsghdr *, struct nlattr **);
1965 int (*dump)(struct sk_buff *, struct netlink_callback *);
1966 int (*done)(struct netlink_callback *);
1967 } xfrm_dispatch[XFRM_NR_MSGTYPES] = {
1968 [XFRM_MSG_NEWSA - XFRM_MSG_BASE] = { .doit = xfrm_add_sa },
1969 [XFRM_MSG_DELSA - XFRM_MSG_BASE] = { .doit = xfrm_del_sa },
1970 [XFRM_MSG_GETSA - XFRM_MSG_BASE] = { .doit = xfrm_get_sa,
1971 .dump = xfrm_dump_sa,
1972 .done = xfrm_dump_sa_done },
1973 [XFRM_MSG_NEWPOLICY - XFRM_MSG_BASE] = { .doit = xfrm_add_policy },
1974 [XFRM_MSG_DELPOLICY - XFRM_MSG_BASE] = { .doit = xfrm_get_policy },
1975 [XFRM_MSG_GETPOLICY - XFRM_MSG_BASE] = { .doit = xfrm_get_policy,
1976 .dump = xfrm_dump_policy,
1977 .done = xfrm_dump_policy_done },
1978 [XFRM_MSG_ALLOCSPI - XFRM_MSG_BASE] = { .doit = xfrm_alloc_userspi },
1979 [XFRM_MSG_ACQUIRE - XFRM_MSG_BASE] = { .doit = xfrm_add_acquire },
1980 [XFRM_MSG_EXPIRE - XFRM_MSG_BASE] = { .doit = xfrm_add_sa_expire },
1981 [XFRM_MSG_UPDPOLICY - XFRM_MSG_BASE] = { .doit = xfrm_add_policy },
1982 [XFRM_MSG_UPDSA - XFRM_MSG_BASE] = { .doit = xfrm_add_sa },
1983 [XFRM_MSG_POLEXPIRE - XFRM_MSG_BASE] = { .doit = xfrm_add_pol_expire},
1984 [XFRM_MSG_FLUSHSA - XFRM_MSG_BASE] = { .doit = xfrm_flush_sa },
1985 [XFRM_MSG_FLUSHPOLICY - XFRM_MSG_BASE] = { .doit = xfrm_flush_policy },
1986 [XFRM_MSG_NEWAE - XFRM_MSG_BASE] = { .doit = xfrm_new_ae },
1987 [XFRM_MSG_GETAE - XFRM_MSG_BASE] = { .doit = xfrm_get_ae },
1988 [XFRM_MSG_MIGRATE - XFRM_MSG_BASE] = { .doit = xfrm_do_migrate },
1989 [XFRM_MSG_GETSADINFO - XFRM_MSG_BASE] = { .doit = xfrm_get_sadinfo },
1990 [XFRM_MSG_GETSPDINFO - XFRM_MSG_BASE] = { .doit = xfrm_get_spdinfo },
1993 static int xfrm_user_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
1995 struct net *net = sock_net(skb->sk);
1996 struct nlattr *attrs[XFRMA_MAX+1];
1997 struct xfrm_link *link;
1998 int type, err;
2000 type = nlh->nlmsg_type;
2001 if (type > XFRM_MSG_MAX)
2002 return -EINVAL;
2004 type -= XFRM_MSG_BASE;
2005 link = &xfrm_dispatch[type];
2007 /* All operations require privileges, even GET */
2008 if (security_netlink_recv(skb, CAP_NET_ADMIN))
2009 return -EPERM;
2011 if ((type == (XFRM_MSG_GETSA - XFRM_MSG_BASE) ||
2012 type == (XFRM_MSG_GETPOLICY - XFRM_MSG_BASE)) &&
2013 (nlh->nlmsg_flags & NLM_F_DUMP)) {
2014 if (link->dump == NULL)
2015 return -EINVAL;
2017 return netlink_dump_start(net->xfrm.nlsk, skb, nlh, link->dump, link->done);
2020 err = nlmsg_parse(nlh, xfrm_msg_min[type], attrs, XFRMA_MAX,
2021 xfrma_policy);
2022 if (err < 0)
2023 return err;
2025 if (link->doit == NULL)
2026 return -EINVAL;
2028 return link->doit(skb, nlh, attrs);
2031 static void xfrm_netlink_rcv(struct sk_buff *skb)
2033 mutex_lock(&xfrm_cfg_mutex);
2034 netlink_rcv_skb(skb, &xfrm_user_rcv_msg);
2035 mutex_unlock(&xfrm_cfg_mutex);
2038 static inline size_t xfrm_expire_msgsize(void)
2040 return NLMSG_ALIGN(sizeof(struct xfrm_user_expire));
2043 static int build_expire(struct sk_buff *skb, struct xfrm_state *x, struct km_event *c)
2045 struct xfrm_user_expire *ue;
2046 struct nlmsghdr *nlh;
2048 nlh = nlmsg_put(skb, c->pid, 0, XFRM_MSG_EXPIRE, sizeof(*ue), 0);
2049 if (nlh == NULL)
2050 return -EMSGSIZE;
2052 ue = nlmsg_data(nlh);
2053 copy_to_user_state(x, &ue->state);
2054 ue->hard = (c->data.hard != 0) ? 1 : 0;
2056 return nlmsg_end(skb, nlh);
2059 static int xfrm_exp_state_notify(struct xfrm_state *x, struct km_event *c)
2061 struct net *net = xs_net(x);
2062 struct sk_buff *skb;
2064 skb = nlmsg_new(xfrm_expire_msgsize(), GFP_ATOMIC);
2065 if (skb == NULL)
2066 return -ENOMEM;
2068 if (build_expire(skb, x, c) < 0)
2069 BUG();
2071 return nlmsg_multicast(net->xfrm.nlsk, skb, 0, XFRMNLGRP_EXPIRE, GFP_ATOMIC);
2074 static int xfrm_aevent_state_notify(struct xfrm_state *x, struct km_event *c)
2076 struct net *net = xs_net(x);
2077 struct sk_buff *skb;
2079 skb = nlmsg_new(xfrm_aevent_msgsize(), GFP_ATOMIC);
2080 if (skb == NULL)
2081 return -ENOMEM;
2083 if (build_aevent(skb, x, c) < 0)
2084 BUG();
2086 return nlmsg_multicast(net->xfrm.nlsk, skb, 0, XFRMNLGRP_AEVENTS, GFP_ATOMIC);
2089 static int xfrm_notify_sa_flush(struct km_event *c)
2091 struct net *net = c->net;
2092 struct xfrm_usersa_flush *p;
2093 struct nlmsghdr *nlh;
2094 struct sk_buff *skb;
2095 int len = NLMSG_ALIGN(sizeof(struct xfrm_usersa_flush));
2097 skb = nlmsg_new(len, GFP_ATOMIC);
2098 if (skb == NULL)
2099 return -ENOMEM;
2101 nlh = nlmsg_put(skb, c->pid, c->seq, XFRM_MSG_FLUSHSA, sizeof(*p), 0);
2102 if (nlh == NULL) {
2103 kfree_skb(skb);
2104 return -EMSGSIZE;
2107 p = nlmsg_data(nlh);
2108 p->proto = c->data.proto;
2110 nlmsg_end(skb, nlh);
2112 return nlmsg_multicast(net->xfrm.nlsk, skb, 0, XFRMNLGRP_SA, GFP_ATOMIC);
2115 static inline size_t xfrm_sa_len(struct xfrm_state *x)
2117 size_t l = 0;
2118 if (x->aead)
2119 l += nla_total_size(aead_len(x->aead));
2120 if (x->aalg)
2121 l += nla_total_size(xfrm_alg_len(x->aalg));
2122 if (x->ealg)
2123 l += nla_total_size(xfrm_alg_len(x->ealg));
2124 if (x->calg)
2125 l += nla_total_size(sizeof(*x->calg));
2126 if (x->encap)
2127 l += nla_total_size(sizeof(*x->encap));
2128 if (x->security)
2129 l += nla_total_size(sizeof(struct xfrm_user_sec_ctx) +
2130 x->security->ctx_len);
2131 if (x->coaddr)
2132 l += nla_total_size(sizeof(*x->coaddr));
2134 /* Must count x->lastused as it may become non-zero behind our back. */
2135 l += nla_total_size(sizeof(u64));
2137 return l;
2140 static int xfrm_notify_sa(struct xfrm_state *x, struct km_event *c)
2142 struct net *net = xs_net(x);
2143 struct xfrm_usersa_info *p;
2144 struct xfrm_usersa_id *id;
2145 struct nlmsghdr *nlh;
2146 struct sk_buff *skb;
2147 int len = xfrm_sa_len(x);
2148 int headlen;
2150 headlen = sizeof(*p);
2151 if (c->event == XFRM_MSG_DELSA) {
2152 len += nla_total_size(headlen);
2153 headlen = sizeof(*id);
2155 len += NLMSG_ALIGN(headlen);
2157 skb = nlmsg_new(len, GFP_ATOMIC);
2158 if (skb == NULL)
2159 return -ENOMEM;
2161 nlh = nlmsg_put(skb, c->pid, c->seq, c->event, headlen, 0);
2162 if (nlh == NULL)
2163 goto nla_put_failure;
2165 p = nlmsg_data(nlh);
2166 if (c->event == XFRM_MSG_DELSA) {
2167 struct nlattr *attr;
2169 id = nlmsg_data(nlh);
2170 memcpy(&id->daddr, &x->id.daddr, sizeof(id->daddr));
2171 id->spi = x->id.spi;
2172 id->family = x->props.family;
2173 id->proto = x->id.proto;
2175 attr = nla_reserve(skb, XFRMA_SA, sizeof(*p));
2176 if (attr == NULL)
2177 goto nla_put_failure;
2179 p = nla_data(attr);
2182 if (copy_to_user_state_extra(x, p, skb))
2183 goto nla_put_failure;
2185 nlmsg_end(skb, nlh);
2187 return nlmsg_multicast(net->xfrm.nlsk, skb, 0, XFRMNLGRP_SA, GFP_ATOMIC);
2189 nla_put_failure:
2190 /* Somebody screwed up with xfrm_sa_len! */
2191 WARN_ON(1);
2192 kfree_skb(skb);
2193 return -1;
2196 static int xfrm_send_state_notify(struct xfrm_state *x, struct km_event *c)
2199 switch (c->event) {
2200 case XFRM_MSG_EXPIRE:
2201 return xfrm_exp_state_notify(x, c);
2202 case XFRM_MSG_NEWAE:
2203 return xfrm_aevent_state_notify(x, c);
2204 case XFRM_MSG_DELSA:
2205 case XFRM_MSG_UPDSA:
2206 case XFRM_MSG_NEWSA:
2207 return xfrm_notify_sa(x, c);
2208 case XFRM_MSG_FLUSHSA:
2209 return xfrm_notify_sa_flush(c);
2210 default:
2211 printk("xfrm_user: Unknown SA event %d\n", c->event);
2212 break;
2215 return 0;
2219 static inline size_t xfrm_acquire_msgsize(struct xfrm_state *x,
2220 struct xfrm_policy *xp)
2222 return NLMSG_ALIGN(sizeof(struct xfrm_user_acquire))
2223 + nla_total_size(sizeof(struct xfrm_user_tmpl) * xp->xfrm_nr)
2224 + nla_total_size(xfrm_user_sec_ctx_size(x->security))
2225 + userpolicy_type_attrsize();
2228 static int build_acquire(struct sk_buff *skb, struct xfrm_state *x,
2229 struct xfrm_tmpl *xt, struct xfrm_policy *xp,
2230 int dir)
2232 struct xfrm_user_acquire *ua;
2233 struct nlmsghdr *nlh;
2234 __u32 seq = xfrm_get_acqseq();
2236 nlh = nlmsg_put(skb, 0, 0, XFRM_MSG_ACQUIRE, sizeof(*ua), 0);
2237 if (nlh == NULL)
2238 return -EMSGSIZE;
2240 ua = nlmsg_data(nlh);
2241 memcpy(&ua->id, &x->id, sizeof(ua->id));
2242 memcpy(&ua->saddr, &x->props.saddr, sizeof(ua->saddr));
2243 memcpy(&ua->sel, &x->sel, sizeof(ua->sel));
2244 copy_to_user_policy(xp, &ua->policy, dir);
2245 ua->aalgos = xt->aalgos;
2246 ua->ealgos = xt->ealgos;
2247 ua->calgos = xt->calgos;
2248 ua->seq = x->km.seq = seq;
2250 if (copy_to_user_tmpl(xp, skb) < 0)
2251 goto nlmsg_failure;
2252 if (copy_to_user_state_sec_ctx(x, skb))
2253 goto nlmsg_failure;
2254 if (copy_to_user_policy_type(xp->type, skb) < 0)
2255 goto nlmsg_failure;
2257 return nlmsg_end(skb, nlh);
2259 nlmsg_failure:
2260 nlmsg_cancel(skb, nlh);
2261 return -EMSGSIZE;
2264 static int xfrm_send_acquire(struct xfrm_state *x, struct xfrm_tmpl *xt,
2265 struct xfrm_policy *xp, int dir)
2267 struct net *net = xs_net(x);
2268 struct sk_buff *skb;
2270 skb = nlmsg_new(xfrm_acquire_msgsize(x, xp), GFP_ATOMIC);
2271 if (skb == NULL)
2272 return -ENOMEM;
2274 if (build_acquire(skb, x, xt, xp, dir) < 0)
2275 BUG();
2277 return nlmsg_multicast(net->xfrm.nlsk, skb, 0, XFRMNLGRP_ACQUIRE, GFP_ATOMIC);
2280 /* User gives us xfrm_user_policy_info followed by an array of 0
2281 * or more templates.
2283 static struct xfrm_policy *xfrm_compile_policy(struct sock *sk, int opt,
2284 u8 *data, int len, int *dir)
2286 struct net *net = sock_net(sk);
2287 struct xfrm_userpolicy_info *p = (struct xfrm_userpolicy_info *)data;
2288 struct xfrm_user_tmpl *ut = (struct xfrm_user_tmpl *) (p + 1);
2289 struct xfrm_policy *xp;
2290 int nr;
2292 switch (sk->sk_family) {
2293 case AF_INET:
2294 if (opt != IP_XFRM_POLICY) {
2295 *dir = -EOPNOTSUPP;
2296 return NULL;
2298 break;
2299 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
2300 case AF_INET6:
2301 if (opt != IPV6_XFRM_POLICY) {
2302 *dir = -EOPNOTSUPP;
2303 return NULL;
2305 break;
2306 #endif
2307 default:
2308 *dir = -EINVAL;
2309 return NULL;
2312 *dir = -EINVAL;
2314 if (len < sizeof(*p) ||
2315 verify_newpolicy_info(p))
2316 return NULL;
2318 nr = ((len - sizeof(*p)) / sizeof(*ut));
2319 if (validate_tmpl(nr, ut, p->sel.family))
2320 return NULL;
2322 if (p->dir > XFRM_POLICY_OUT)
2323 return NULL;
2325 xp = xfrm_policy_alloc(net, GFP_KERNEL);
2326 if (xp == NULL) {
2327 *dir = -ENOBUFS;
2328 return NULL;
2331 copy_from_user_policy(xp, p);
2332 xp->type = XFRM_POLICY_TYPE_MAIN;
2333 copy_templates(xp, ut, nr);
2335 *dir = p->dir;
2337 return xp;
2340 static inline size_t xfrm_polexpire_msgsize(struct xfrm_policy *xp)
2342 return NLMSG_ALIGN(sizeof(struct xfrm_user_polexpire))
2343 + nla_total_size(sizeof(struct xfrm_user_tmpl) * xp->xfrm_nr)
2344 + nla_total_size(xfrm_user_sec_ctx_size(xp->security))
2345 + userpolicy_type_attrsize();
2348 static int build_polexpire(struct sk_buff *skb, struct xfrm_policy *xp,
2349 int dir, struct km_event *c)
2351 struct xfrm_user_polexpire *upe;
2352 struct nlmsghdr *nlh;
2353 int hard = c->data.hard;
2355 nlh = nlmsg_put(skb, c->pid, 0, XFRM_MSG_POLEXPIRE, sizeof(*upe), 0);
2356 if (nlh == NULL)
2357 return -EMSGSIZE;
2359 upe = nlmsg_data(nlh);
2360 copy_to_user_policy(xp, &upe->pol, dir);
2361 if (copy_to_user_tmpl(xp, skb) < 0)
2362 goto nlmsg_failure;
2363 if (copy_to_user_sec_ctx(xp, skb))
2364 goto nlmsg_failure;
2365 if (copy_to_user_policy_type(xp->type, skb) < 0)
2366 goto nlmsg_failure;
2367 upe->hard = !!hard;
2369 return nlmsg_end(skb, nlh);
2371 nlmsg_failure:
2372 nlmsg_cancel(skb, nlh);
2373 return -EMSGSIZE;
2376 static int xfrm_exp_policy_notify(struct xfrm_policy *xp, int dir, struct km_event *c)
2378 struct net *net = xp_net(xp);
2379 struct sk_buff *skb;
2381 skb = nlmsg_new(xfrm_polexpire_msgsize(xp), GFP_ATOMIC);
2382 if (skb == NULL)
2383 return -ENOMEM;
2385 if (build_polexpire(skb, xp, dir, c) < 0)
2386 BUG();
2388 return nlmsg_multicast(net->xfrm.nlsk, skb, 0, XFRMNLGRP_EXPIRE, GFP_ATOMIC);
2391 static int xfrm_notify_policy(struct xfrm_policy *xp, int dir, struct km_event *c)
2393 struct net *net = xp_net(xp);
2394 struct xfrm_userpolicy_info *p;
2395 struct xfrm_userpolicy_id *id;
2396 struct nlmsghdr *nlh;
2397 struct sk_buff *skb;
2398 int len = nla_total_size(sizeof(struct xfrm_user_tmpl) * xp->xfrm_nr);
2399 int headlen;
2401 headlen = sizeof(*p);
2402 if (c->event == XFRM_MSG_DELPOLICY) {
2403 len += nla_total_size(headlen);
2404 headlen = sizeof(*id);
2406 len += userpolicy_type_attrsize();
2407 len += NLMSG_ALIGN(headlen);
2409 skb = nlmsg_new(len, GFP_ATOMIC);
2410 if (skb == NULL)
2411 return -ENOMEM;
2413 nlh = nlmsg_put(skb, c->pid, c->seq, c->event, headlen, 0);
2414 if (nlh == NULL)
2415 goto nlmsg_failure;
2417 p = nlmsg_data(nlh);
2418 if (c->event == XFRM_MSG_DELPOLICY) {
2419 struct nlattr *attr;
2421 id = nlmsg_data(nlh);
2422 memset(id, 0, sizeof(*id));
2423 id->dir = dir;
2424 if (c->data.byid)
2425 id->index = xp->index;
2426 else
2427 memcpy(&id->sel, &xp->selector, sizeof(id->sel));
2429 attr = nla_reserve(skb, XFRMA_POLICY, sizeof(*p));
2430 if (attr == NULL)
2431 goto nlmsg_failure;
2433 p = nla_data(attr);
2436 copy_to_user_policy(xp, p, dir);
2437 if (copy_to_user_tmpl(xp, skb) < 0)
2438 goto nlmsg_failure;
2439 if (copy_to_user_policy_type(xp->type, skb) < 0)
2440 goto nlmsg_failure;
2442 nlmsg_end(skb, nlh);
2444 return nlmsg_multicast(net->xfrm.nlsk, skb, 0, XFRMNLGRP_POLICY, GFP_ATOMIC);
2446 nlmsg_failure:
2447 kfree_skb(skb);
2448 return -1;
2451 static int xfrm_notify_policy_flush(struct km_event *c)
2453 struct net *net = c->net;
2454 struct nlmsghdr *nlh;
2455 struct sk_buff *skb;
2457 skb = nlmsg_new(userpolicy_type_attrsize(), GFP_ATOMIC);
2458 if (skb == NULL)
2459 return -ENOMEM;
2461 nlh = nlmsg_put(skb, c->pid, c->seq, XFRM_MSG_FLUSHPOLICY, 0, 0);
2462 if (nlh == NULL)
2463 goto nlmsg_failure;
2464 if (copy_to_user_policy_type(c->data.type, skb) < 0)
2465 goto nlmsg_failure;
2467 nlmsg_end(skb, nlh);
2469 return nlmsg_multicast(net->xfrm.nlsk, skb, 0, XFRMNLGRP_POLICY, GFP_ATOMIC);
2471 nlmsg_failure:
2472 kfree_skb(skb);
2473 return -1;
2476 static int xfrm_send_policy_notify(struct xfrm_policy *xp, int dir, struct km_event *c)
2479 switch (c->event) {
2480 case XFRM_MSG_NEWPOLICY:
2481 case XFRM_MSG_UPDPOLICY:
2482 case XFRM_MSG_DELPOLICY:
2483 return xfrm_notify_policy(xp, dir, c);
2484 case XFRM_MSG_FLUSHPOLICY:
2485 return xfrm_notify_policy_flush(c);
2486 case XFRM_MSG_POLEXPIRE:
2487 return xfrm_exp_policy_notify(xp, dir, c);
2488 default:
2489 printk("xfrm_user: Unknown Policy event %d\n", c->event);
2492 return 0;
2496 static inline size_t xfrm_report_msgsize(void)
2498 return NLMSG_ALIGN(sizeof(struct xfrm_user_report));
2501 static int build_report(struct sk_buff *skb, u8 proto,
2502 struct xfrm_selector *sel, xfrm_address_t *addr)
2504 struct xfrm_user_report *ur;
2505 struct nlmsghdr *nlh;
2507 nlh = nlmsg_put(skb, 0, 0, XFRM_MSG_REPORT, sizeof(*ur), 0);
2508 if (nlh == NULL)
2509 return -EMSGSIZE;
2511 ur = nlmsg_data(nlh);
2512 ur->proto = proto;
2513 memcpy(&ur->sel, sel, sizeof(ur->sel));
2515 if (addr)
2516 NLA_PUT(skb, XFRMA_COADDR, sizeof(*addr), addr);
2518 return nlmsg_end(skb, nlh);
2520 nla_put_failure:
2521 nlmsg_cancel(skb, nlh);
2522 return -EMSGSIZE;
2525 static int xfrm_send_report(struct net *net, u8 proto,
2526 struct xfrm_selector *sel, xfrm_address_t *addr)
2528 struct sk_buff *skb;
2530 skb = nlmsg_new(xfrm_report_msgsize(), GFP_ATOMIC);
2531 if (skb == NULL)
2532 return -ENOMEM;
2534 if (build_report(skb, proto, sel, addr) < 0)
2535 BUG();
2537 return nlmsg_multicast(net->xfrm.nlsk, skb, 0, XFRMNLGRP_REPORT, GFP_ATOMIC);
2540 static inline size_t xfrm_mapping_msgsize(void)
2542 return NLMSG_ALIGN(sizeof(struct xfrm_user_mapping));
2545 static int build_mapping(struct sk_buff *skb, struct xfrm_state *x,
2546 xfrm_address_t *new_saddr, __be16 new_sport)
2548 struct xfrm_user_mapping *um;
2549 struct nlmsghdr *nlh;
2551 nlh = nlmsg_put(skb, 0, 0, XFRM_MSG_MAPPING, sizeof(*um), 0);
2552 if (nlh == NULL)
2553 return -EMSGSIZE;
2555 um = nlmsg_data(nlh);
2557 memcpy(&um->id.daddr, &x->id.daddr, sizeof(um->id.daddr));
2558 um->id.spi = x->id.spi;
2559 um->id.family = x->props.family;
2560 um->id.proto = x->id.proto;
2561 memcpy(&um->new_saddr, new_saddr, sizeof(um->new_saddr));
2562 memcpy(&um->old_saddr, &x->props.saddr, sizeof(um->old_saddr));
2563 um->new_sport = new_sport;
2564 um->old_sport = x->encap->encap_sport;
2565 um->reqid = x->props.reqid;
2567 return nlmsg_end(skb, nlh);
2570 static int xfrm_send_mapping(struct xfrm_state *x, xfrm_address_t *ipaddr,
2571 __be16 sport)
2573 struct net *net = xs_net(x);
2574 struct sk_buff *skb;
2576 if (x->id.proto != IPPROTO_ESP)
2577 return -EINVAL;
2579 if (!x->encap)
2580 return -EINVAL;
2582 skb = nlmsg_new(xfrm_mapping_msgsize(), GFP_ATOMIC);
2583 if (skb == NULL)
2584 return -ENOMEM;
2586 if (build_mapping(skb, x, ipaddr, sport) < 0)
2587 BUG();
2589 return nlmsg_multicast(net->xfrm.nlsk, skb, 0, XFRMNLGRP_MAPPING, GFP_ATOMIC);
2592 static struct xfrm_mgr netlink_mgr = {
2593 .id = "netlink",
2594 .notify = xfrm_send_state_notify,
2595 .acquire = xfrm_send_acquire,
2596 .compile_policy = xfrm_compile_policy,
2597 .notify_policy = xfrm_send_policy_notify,
2598 .report = xfrm_send_report,
2599 .migrate = xfrm_send_migrate,
2600 .new_mapping = xfrm_send_mapping,
2603 static int __net_init xfrm_user_net_init(struct net *net)
2605 struct sock *nlsk;
2607 nlsk = netlink_kernel_create(net, NETLINK_XFRM, XFRMNLGRP_MAX,
2608 xfrm_netlink_rcv, NULL, THIS_MODULE);
2609 if (nlsk == NULL)
2610 return -ENOMEM;
2611 rcu_assign_pointer(net->xfrm.nlsk, nlsk);
2612 return 0;
2615 static void __net_exit xfrm_user_net_exit(struct net *net)
2617 struct sock *nlsk = net->xfrm.nlsk;
2619 rcu_assign_pointer(net->xfrm.nlsk, NULL);
2620 synchronize_rcu();
2621 netlink_kernel_release(nlsk);
2624 static struct pernet_operations xfrm_user_net_ops = {
2625 .init = xfrm_user_net_init,
2626 .exit = xfrm_user_net_exit,
2629 static int __init xfrm_user_init(void)
2631 int rv;
2633 printk(KERN_INFO "Initializing XFRM netlink socket\n");
2635 rv = register_pernet_subsys(&xfrm_user_net_ops);
2636 if (rv < 0)
2637 return rv;
2638 rv = xfrm_register_km(&netlink_mgr);
2639 if (rv < 0)
2640 unregister_pernet_subsys(&xfrm_user_net_ops);
2641 return rv;
2644 static void __exit xfrm_user_exit(void)
2646 xfrm_unregister_km(&netlink_mgr);
2647 unregister_pernet_subsys(&xfrm_user_net_ops);
2650 module_init(xfrm_user_init);
2651 module_exit(xfrm_user_exit);
2652 MODULE_LICENSE("GPL");
2653 MODULE_ALIAS_NET_PF_PROTO(PF_NETLINK, NETLINK_XFRM);