6 * Kazunori MIYAZAWA @USAGI
7 * Kunihiro Ishiguro <kunihiro@ipinfusion.com>
9 * Kazunori MIYAZAWA @USAGI
11 * Split up af-specific portion
12 * Derek Atkins <derek@ihtfp.com> Add the post_input processor
16 #include <linux/err.h>
17 #include <linux/slab.h>
18 #include <linux/kmod.h>
19 #include <linux/list.h>
20 #include <linux/spinlock.h>
21 #include <linux/workqueue.h>
22 #include <linux/notifier.h>
23 #include <linux/netdevice.h>
24 #include <linux/netfilter.h>
25 #include <linux/module.h>
26 #include <linux/cache.h>
27 #include <linux/audit.h>
32 #ifdef CONFIG_XFRM_STATISTICS
36 #include "xfrm_hash.h"
38 #define XFRM_QUEUE_TMO_MIN ((unsigned)(HZ/10))
39 #define XFRM_QUEUE_TMO_MAX ((unsigned)(60*HZ))
40 #define XFRM_MAX_QUEUE_LEN 100
42 DEFINE_MUTEX(xfrm_cfg_mutex
);
43 EXPORT_SYMBOL(xfrm_cfg_mutex
);
45 static DEFINE_SPINLOCK(xfrm_policy_sk_bundle_lock
);
46 static struct dst_entry
*xfrm_policy_sk_bundles
;
47 static DEFINE_RWLOCK(xfrm_policy_lock
);
49 static DEFINE_SPINLOCK(xfrm_policy_afinfo_lock
);
50 static struct xfrm_policy_afinfo __rcu
*xfrm_policy_afinfo
[NPROTO
]
53 static struct kmem_cache
*xfrm_dst_cache __read_mostly
;
55 static void xfrm_init_pmtu(struct dst_entry
*dst
);
56 static int stale_bundle(struct dst_entry
*dst
);
57 static int xfrm_bundle_ok(struct xfrm_dst
*xdst
);
58 static void xfrm_policy_queue_process(unsigned long arg
);
60 static struct xfrm_policy
*__xfrm_policy_unlink(struct xfrm_policy
*pol
,
64 __xfrm4_selector_match(const struct xfrm_selector
*sel
, const struct flowi
*fl
)
66 const struct flowi4
*fl4
= &fl
->u
.ip4
;
68 return addr4_match(fl4
->daddr
, sel
->daddr
.a4
, sel
->prefixlen_d
) &&
69 addr4_match(fl4
->saddr
, sel
->saddr
.a4
, sel
->prefixlen_s
) &&
70 !((xfrm_flowi_dport(fl
, &fl4
->uli
) ^ sel
->dport
) & sel
->dport_mask
) &&
71 !((xfrm_flowi_sport(fl
, &fl4
->uli
) ^ sel
->sport
) & sel
->sport_mask
) &&
72 (fl4
->flowi4_proto
== sel
->proto
|| !sel
->proto
) &&
73 (fl4
->flowi4_oif
== sel
->ifindex
|| !sel
->ifindex
);
77 __xfrm6_selector_match(const struct xfrm_selector
*sel
, const struct flowi
*fl
)
79 const struct flowi6
*fl6
= &fl
->u
.ip6
;
81 return addr_match(&fl6
->daddr
, &sel
->daddr
, sel
->prefixlen_d
) &&
82 addr_match(&fl6
->saddr
, &sel
->saddr
, sel
->prefixlen_s
) &&
83 !((xfrm_flowi_dport(fl
, &fl6
->uli
) ^ sel
->dport
) & sel
->dport_mask
) &&
84 !((xfrm_flowi_sport(fl
, &fl6
->uli
) ^ sel
->sport
) & sel
->sport_mask
) &&
85 (fl6
->flowi6_proto
== sel
->proto
|| !sel
->proto
) &&
86 (fl6
->flowi6_oif
== sel
->ifindex
|| !sel
->ifindex
);
89 bool xfrm_selector_match(const struct xfrm_selector
*sel
, const struct flowi
*fl
,
90 unsigned short family
)
94 return __xfrm4_selector_match(sel
, fl
);
96 return __xfrm6_selector_match(sel
, fl
);
101 static struct xfrm_policy_afinfo
*xfrm_policy_get_afinfo(unsigned short family
)
103 struct xfrm_policy_afinfo
*afinfo
;
105 if (unlikely(family
>= NPROTO
))
108 afinfo
= rcu_dereference(xfrm_policy_afinfo
[family
]);
109 if (unlikely(!afinfo
))
114 static void xfrm_policy_put_afinfo(struct xfrm_policy_afinfo
*afinfo
)
119 static inline struct dst_entry
*__xfrm_dst_lookup(struct net
*net
, int tos
,
120 const xfrm_address_t
*saddr
,
121 const xfrm_address_t
*daddr
,
124 struct xfrm_policy_afinfo
*afinfo
;
125 struct dst_entry
*dst
;
127 afinfo
= xfrm_policy_get_afinfo(family
);
128 if (unlikely(afinfo
== NULL
))
129 return ERR_PTR(-EAFNOSUPPORT
);
131 dst
= afinfo
->dst_lookup(net
, tos
, saddr
, daddr
);
133 xfrm_policy_put_afinfo(afinfo
);
138 static inline struct dst_entry
*xfrm_dst_lookup(struct xfrm_state
*x
, int tos
,
139 xfrm_address_t
*prev_saddr
,
140 xfrm_address_t
*prev_daddr
,
143 struct net
*net
= xs_net(x
);
144 xfrm_address_t
*saddr
= &x
->props
.saddr
;
145 xfrm_address_t
*daddr
= &x
->id
.daddr
;
146 struct dst_entry
*dst
;
148 if (x
->type
->flags
& XFRM_TYPE_LOCAL_COADDR
) {
152 if (x
->type
->flags
& XFRM_TYPE_REMOTE_COADDR
) {
157 dst
= __xfrm_dst_lookup(net
, tos
, saddr
, daddr
, family
);
160 if (prev_saddr
!= saddr
)
161 memcpy(prev_saddr
, saddr
, sizeof(*prev_saddr
));
162 if (prev_daddr
!= daddr
)
163 memcpy(prev_daddr
, daddr
, sizeof(*prev_daddr
));
169 static inline unsigned long make_jiffies(long secs
)
171 if (secs
>= (MAX_SCHEDULE_TIMEOUT
-1)/HZ
)
172 return MAX_SCHEDULE_TIMEOUT
-1;
177 static void xfrm_policy_timer(unsigned long data
)
179 struct xfrm_policy
*xp
= (struct xfrm_policy
*)data
;
180 unsigned long now
= get_seconds();
181 long next
= LONG_MAX
;
185 read_lock(&xp
->lock
);
187 if (unlikely(xp
->walk
.dead
))
190 dir
= xfrm_policy_id2dir(xp
->index
);
192 if (xp
->lft
.hard_add_expires_seconds
) {
193 long tmo
= xp
->lft
.hard_add_expires_seconds
+
194 xp
->curlft
.add_time
- now
;
200 if (xp
->lft
.hard_use_expires_seconds
) {
201 long tmo
= xp
->lft
.hard_use_expires_seconds
+
202 (xp
->curlft
.use_time
? : xp
->curlft
.add_time
) - now
;
208 if (xp
->lft
.soft_add_expires_seconds
) {
209 long tmo
= xp
->lft
.soft_add_expires_seconds
+
210 xp
->curlft
.add_time
- now
;
213 tmo
= XFRM_KM_TIMEOUT
;
218 if (xp
->lft
.soft_use_expires_seconds
) {
219 long tmo
= xp
->lft
.soft_use_expires_seconds
+
220 (xp
->curlft
.use_time
? : xp
->curlft
.add_time
) - now
;
223 tmo
= XFRM_KM_TIMEOUT
;
230 km_policy_expired(xp
, dir
, 0, 0);
231 if (next
!= LONG_MAX
&&
232 !mod_timer(&xp
->timer
, jiffies
+ make_jiffies(next
)))
236 read_unlock(&xp
->lock
);
241 read_unlock(&xp
->lock
);
242 if (!xfrm_policy_delete(xp
, dir
))
243 km_policy_expired(xp
, dir
, 1, 0);
247 static struct flow_cache_object
*xfrm_policy_flo_get(struct flow_cache_object
*flo
)
249 struct xfrm_policy
*pol
= container_of(flo
, struct xfrm_policy
, flo
);
251 if (unlikely(pol
->walk
.dead
))
259 static int xfrm_policy_flo_check(struct flow_cache_object
*flo
)
261 struct xfrm_policy
*pol
= container_of(flo
, struct xfrm_policy
, flo
);
263 return !pol
->walk
.dead
;
266 static void xfrm_policy_flo_delete(struct flow_cache_object
*flo
)
268 xfrm_pol_put(container_of(flo
, struct xfrm_policy
, flo
));
271 static const struct flow_cache_ops xfrm_policy_fc_ops
= {
272 .get
= xfrm_policy_flo_get
,
273 .check
= xfrm_policy_flo_check
,
274 .delete = xfrm_policy_flo_delete
,
277 /* Allocate xfrm_policy. Not used here, it is supposed to be used by pfkeyv2
281 struct xfrm_policy
*xfrm_policy_alloc(struct net
*net
, gfp_t gfp
)
283 struct xfrm_policy
*policy
;
285 policy
= kzalloc(sizeof(struct xfrm_policy
), gfp
);
288 write_pnet(&policy
->xp_net
, net
);
289 INIT_LIST_HEAD(&policy
->walk
.all
);
290 INIT_HLIST_NODE(&policy
->bydst
);
291 INIT_HLIST_NODE(&policy
->byidx
);
292 rwlock_init(&policy
->lock
);
293 atomic_set(&policy
->refcnt
, 1);
294 skb_queue_head_init(&policy
->polq
.hold_queue
);
295 setup_timer(&policy
->timer
, xfrm_policy_timer
,
296 (unsigned long)policy
);
297 setup_timer(&policy
->polq
.hold_timer
, xfrm_policy_queue_process
,
298 (unsigned long)policy
);
299 policy
->flo
.ops
= &xfrm_policy_fc_ops
;
303 EXPORT_SYMBOL(xfrm_policy_alloc
);
305 /* Destroy xfrm_policy: descendant resources must be released to this moment. */
307 void xfrm_policy_destroy(struct xfrm_policy
*policy
)
309 BUG_ON(!policy
->walk
.dead
);
311 if (del_timer(&policy
->timer
) || del_timer(&policy
->polq
.hold_timer
))
314 security_xfrm_policy_free(policy
->security
);
317 EXPORT_SYMBOL(xfrm_policy_destroy
);
319 static void xfrm_queue_purge(struct sk_buff_head
*list
)
323 while ((skb
= skb_dequeue(list
)) != NULL
)
327 /* Rule must be locked. Release descentant resources, announce
328 * entry dead. The rule must be unlinked from lists to the moment.
331 static void xfrm_policy_kill(struct xfrm_policy
*policy
)
333 policy
->walk
.dead
= 1;
335 atomic_inc(&policy
->genid
);
337 if (del_timer(&policy
->polq
.hold_timer
))
338 xfrm_pol_put(policy
);
339 xfrm_queue_purge(&policy
->polq
.hold_queue
);
341 if (del_timer(&policy
->timer
))
342 xfrm_pol_put(policy
);
344 xfrm_pol_put(policy
);
347 static unsigned int xfrm_policy_hashmax __read_mostly
= 1 * 1024 * 1024;
349 static inline unsigned int idx_hash(struct net
*net
, u32 index
)
351 return __idx_hash(index
, net
->xfrm
.policy_idx_hmask
);
354 static struct hlist_head
*policy_hash_bysel(struct net
*net
,
355 const struct xfrm_selector
*sel
,
356 unsigned short family
, int dir
)
358 unsigned int hmask
= net
->xfrm
.policy_bydst
[dir
].hmask
;
359 unsigned int hash
= __sel_hash(sel
, family
, hmask
);
361 return (hash
== hmask
+ 1 ?
362 &net
->xfrm
.policy_inexact
[dir
] :
363 net
->xfrm
.policy_bydst
[dir
].table
+ hash
);
366 static struct hlist_head
*policy_hash_direct(struct net
*net
,
367 const xfrm_address_t
*daddr
,
368 const xfrm_address_t
*saddr
,
369 unsigned short family
, int dir
)
371 unsigned int hmask
= net
->xfrm
.policy_bydst
[dir
].hmask
;
372 unsigned int hash
= __addr_hash(daddr
, saddr
, family
, hmask
);
374 return net
->xfrm
.policy_bydst
[dir
].table
+ hash
;
377 static void xfrm_dst_hash_transfer(struct hlist_head
*list
,
378 struct hlist_head
*ndsttable
,
379 unsigned int nhashmask
)
381 struct hlist_node
*tmp
, *entry0
= NULL
;
382 struct xfrm_policy
*pol
;
386 hlist_for_each_entry_safe(pol
, tmp
, list
, bydst
) {
389 h
= __addr_hash(&pol
->selector
.daddr
, &pol
->selector
.saddr
,
390 pol
->family
, nhashmask
);
392 hlist_del(&pol
->bydst
);
393 hlist_add_head(&pol
->bydst
, ndsttable
+h
);
398 hlist_del(&pol
->bydst
);
399 hlist_add_after(entry0
, &pol
->bydst
);
401 entry0
= &pol
->bydst
;
403 if (!hlist_empty(list
)) {
409 static void xfrm_idx_hash_transfer(struct hlist_head
*list
,
410 struct hlist_head
*nidxtable
,
411 unsigned int nhashmask
)
413 struct hlist_node
*tmp
;
414 struct xfrm_policy
*pol
;
416 hlist_for_each_entry_safe(pol
, tmp
, list
, byidx
) {
419 h
= __idx_hash(pol
->index
, nhashmask
);
420 hlist_add_head(&pol
->byidx
, nidxtable
+h
);
424 static unsigned long xfrm_new_hash_mask(unsigned int old_hmask
)
426 return ((old_hmask
+ 1) << 1) - 1;
429 static void xfrm_bydst_resize(struct net
*net
, int dir
)
431 unsigned int hmask
= net
->xfrm
.policy_bydst
[dir
].hmask
;
432 unsigned int nhashmask
= xfrm_new_hash_mask(hmask
);
433 unsigned int nsize
= (nhashmask
+ 1) * sizeof(struct hlist_head
);
434 struct hlist_head
*odst
= net
->xfrm
.policy_bydst
[dir
].table
;
435 struct hlist_head
*ndst
= xfrm_hash_alloc(nsize
);
441 write_lock_bh(&xfrm_policy_lock
);
443 for (i
= hmask
; i
>= 0; i
--)
444 xfrm_dst_hash_transfer(odst
+ i
, ndst
, nhashmask
);
446 net
->xfrm
.policy_bydst
[dir
].table
= ndst
;
447 net
->xfrm
.policy_bydst
[dir
].hmask
= nhashmask
;
449 write_unlock_bh(&xfrm_policy_lock
);
451 xfrm_hash_free(odst
, (hmask
+ 1) * sizeof(struct hlist_head
));
454 static void xfrm_byidx_resize(struct net
*net
, int total
)
456 unsigned int hmask
= net
->xfrm
.policy_idx_hmask
;
457 unsigned int nhashmask
= xfrm_new_hash_mask(hmask
);
458 unsigned int nsize
= (nhashmask
+ 1) * sizeof(struct hlist_head
);
459 struct hlist_head
*oidx
= net
->xfrm
.policy_byidx
;
460 struct hlist_head
*nidx
= xfrm_hash_alloc(nsize
);
466 write_lock_bh(&xfrm_policy_lock
);
468 for (i
= hmask
; i
>= 0; i
--)
469 xfrm_idx_hash_transfer(oidx
+ i
, nidx
, nhashmask
);
471 net
->xfrm
.policy_byidx
= nidx
;
472 net
->xfrm
.policy_idx_hmask
= nhashmask
;
474 write_unlock_bh(&xfrm_policy_lock
);
476 xfrm_hash_free(oidx
, (hmask
+ 1) * sizeof(struct hlist_head
));
479 static inline int xfrm_bydst_should_resize(struct net
*net
, int dir
, int *total
)
481 unsigned int cnt
= net
->xfrm
.policy_count
[dir
];
482 unsigned int hmask
= net
->xfrm
.policy_bydst
[dir
].hmask
;
487 if ((hmask
+ 1) < xfrm_policy_hashmax
&&
494 static inline int xfrm_byidx_should_resize(struct net
*net
, int total
)
496 unsigned int hmask
= net
->xfrm
.policy_idx_hmask
;
498 if ((hmask
+ 1) < xfrm_policy_hashmax
&&
505 void xfrm_spd_getinfo(struct net
*net
, struct xfrmk_spdinfo
*si
)
507 read_lock_bh(&xfrm_policy_lock
);
508 si
->incnt
= net
->xfrm
.policy_count
[XFRM_POLICY_IN
];
509 si
->outcnt
= net
->xfrm
.policy_count
[XFRM_POLICY_OUT
];
510 si
->fwdcnt
= net
->xfrm
.policy_count
[XFRM_POLICY_FWD
];
511 si
->inscnt
= net
->xfrm
.policy_count
[XFRM_POLICY_IN
+XFRM_POLICY_MAX
];
512 si
->outscnt
= net
->xfrm
.policy_count
[XFRM_POLICY_OUT
+XFRM_POLICY_MAX
];
513 si
->fwdscnt
= net
->xfrm
.policy_count
[XFRM_POLICY_FWD
+XFRM_POLICY_MAX
];
514 si
->spdhcnt
= net
->xfrm
.policy_idx_hmask
;
515 si
->spdhmcnt
= xfrm_policy_hashmax
;
516 read_unlock_bh(&xfrm_policy_lock
);
518 EXPORT_SYMBOL(xfrm_spd_getinfo
);
520 static DEFINE_MUTEX(hash_resize_mutex
);
521 static void xfrm_hash_resize(struct work_struct
*work
)
523 struct net
*net
= container_of(work
, struct net
, xfrm
.policy_hash_work
);
526 mutex_lock(&hash_resize_mutex
);
529 for (dir
= 0; dir
< XFRM_POLICY_MAX
* 2; dir
++) {
530 if (xfrm_bydst_should_resize(net
, dir
, &total
))
531 xfrm_bydst_resize(net
, dir
);
533 if (xfrm_byidx_should_resize(net
, total
))
534 xfrm_byidx_resize(net
, total
);
536 mutex_unlock(&hash_resize_mutex
);
539 /* Generate new index... KAME seems to generate them ordered by cost
540 * of an absolute inpredictability of ordering of rules. This will not pass. */
541 static u32
xfrm_gen_index(struct net
*net
, int dir
)
543 static u32 idx_generator
;
546 struct hlist_head
*list
;
547 struct xfrm_policy
*p
;
551 idx
= (idx_generator
| dir
);
555 list
= net
->xfrm
.policy_byidx
+ idx_hash(net
, idx
);
557 hlist_for_each_entry(p
, list
, byidx
) {
558 if (p
->index
== idx
) {
568 static inline int selector_cmp(struct xfrm_selector
*s1
, struct xfrm_selector
*s2
)
570 u32
*p1
= (u32
*) s1
;
571 u32
*p2
= (u32
*) s2
;
572 int len
= sizeof(struct xfrm_selector
) / sizeof(u32
);
575 for (i
= 0; i
< len
; i
++) {
583 static void xfrm_policy_requeue(struct xfrm_policy
*old
,
584 struct xfrm_policy
*new)
586 struct xfrm_policy_queue
*pq
= &old
->polq
;
587 struct sk_buff_head list
;
589 __skb_queue_head_init(&list
);
591 spin_lock_bh(&pq
->hold_queue
.lock
);
592 skb_queue_splice_init(&pq
->hold_queue
, &list
);
593 if (del_timer(&pq
->hold_timer
))
595 spin_unlock_bh(&pq
->hold_queue
.lock
);
597 if (skb_queue_empty(&list
))
602 spin_lock_bh(&pq
->hold_queue
.lock
);
603 skb_queue_splice(&list
, &pq
->hold_queue
);
604 pq
->timeout
= XFRM_QUEUE_TMO_MIN
;
605 if (!mod_timer(&pq
->hold_timer
, jiffies
))
607 spin_unlock_bh(&pq
->hold_queue
.lock
);
610 static bool xfrm_policy_mark_match(struct xfrm_policy
*policy
,
611 struct xfrm_policy
*pol
)
613 u32 mark
= policy
->mark
.v
& policy
->mark
.m
;
615 if (policy
->mark
.v
== pol
->mark
.v
&& policy
->mark
.m
== pol
->mark
.m
)
618 if ((mark
& pol
->mark
.m
) == pol
->mark
.v
&&
619 policy
->priority
== pol
->priority
)
625 int xfrm_policy_insert(int dir
, struct xfrm_policy
*policy
, int excl
)
627 struct net
*net
= xp_net(policy
);
628 struct xfrm_policy
*pol
;
629 struct xfrm_policy
*delpol
;
630 struct hlist_head
*chain
;
631 struct hlist_node
*newpos
;
633 write_lock_bh(&xfrm_policy_lock
);
634 chain
= policy_hash_bysel(net
, &policy
->selector
, policy
->family
, dir
);
637 hlist_for_each_entry(pol
, chain
, bydst
) {
638 if (pol
->type
== policy
->type
&&
639 !selector_cmp(&pol
->selector
, &policy
->selector
) &&
640 xfrm_policy_mark_match(policy
, pol
) &&
641 xfrm_sec_ctx_match(pol
->security
, policy
->security
) &&
644 write_unlock_bh(&xfrm_policy_lock
);
648 if (policy
->priority
> pol
->priority
)
650 } else if (policy
->priority
>= pol
->priority
) {
651 newpos
= &pol
->bydst
;
658 hlist_add_after(newpos
, &policy
->bydst
);
660 hlist_add_head(&policy
->bydst
, chain
);
661 xfrm_pol_hold(policy
);
662 net
->xfrm
.policy_count
[dir
]++;
663 atomic_inc(&flow_cache_genid
);
665 /* After previous checking, family can either be AF_INET or AF_INET6 */
666 if (policy
->family
== AF_INET
)
667 rt_genid_bump_ipv4(net
);
669 rt_genid_bump_ipv6(net
);
672 xfrm_policy_requeue(delpol
, policy
);
673 __xfrm_policy_unlink(delpol
, dir
);
675 policy
->index
= delpol
? delpol
->index
: xfrm_gen_index(net
, dir
);
676 hlist_add_head(&policy
->byidx
, net
->xfrm
.policy_byidx
+idx_hash(net
, policy
->index
));
677 policy
->curlft
.add_time
= get_seconds();
678 policy
->curlft
.use_time
= 0;
679 if (!mod_timer(&policy
->timer
, jiffies
+ HZ
))
680 xfrm_pol_hold(policy
);
681 list_add(&policy
->walk
.all
, &net
->xfrm
.policy_all
);
682 write_unlock_bh(&xfrm_policy_lock
);
685 xfrm_policy_kill(delpol
);
686 else if (xfrm_bydst_should_resize(net
, dir
, NULL
))
687 schedule_work(&net
->xfrm
.policy_hash_work
);
691 EXPORT_SYMBOL(xfrm_policy_insert
);
693 struct xfrm_policy
*xfrm_policy_bysel_ctx(struct net
*net
, u32 mark
, u8 type
,
694 int dir
, struct xfrm_selector
*sel
,
695 struct xfrm_sec_ctx
*ctx
, int delete,
698 struct xfrm_policy
*pol
, *ret
;
699 struct hlist_head
*chain
;
702 write_lock_bh(&xfrm_policy_lock
);
703 chain
= policy_hash_bysel(net
, sel
, sel
->family
, dir
);
705 hlist_for_each_entry(pol
, chain
, bydst
) {
706 if (pol
->type
== type
&&
707 (mark
& pol
->mark
.m
) == pol
->mark
.v
&&
708 !selector_cmp(sel
, &pol
->selector
) &&
709 xfrm_sec_ctx_match(ctx
, pol
->security
)) {
712 *err
= security_xfrm_policy_delete(
715 write_unlock_bh(&xfrm_policy_lock
);
718 __xfrm_policy_unlink(pol
, dir
);
724 write_unlock_bh(&xfrm_policy_lock
);
727 xfrm_policy_kill(ret
);
730 EXPORT_SYMBOL(xfrm_policy_bysel_ctx
);
732 struct xfrm_policy
*xfrm_policy_byid(struct net
*net
, u32 mark
, u8 type
,
733 int dir
, u32 id
, int delete, int *err
)
735 struct xfrm_policy
*pol
, *ret
;
736 struct hlist_head
*chain
;
739 if (xfrm_policy_id2dir(id
) != dir
)
743 write_lock_bh(&xfrm_policy_lock
);
744 chain
= net
->xfrm
.policy_byidx
+ idx_hash(net
, id
);
746 hlist_for_each_entry(pol
, chain
, byidx
) {
747 if (pol
->type
== type
&& pol
->index
== id
&&
748 (mark
& pol
->mark
.m
) == pol
->mark
.v
) {
751 *err
= security_xfrm_policy_delete(
754 write_unlock_bh(&xfrm_policy_lock
);
757 __xfrm_policy_unlink(pol
, dir
);
763 write_unlock_bh(&xfrm_policy_lock
);
766 xfrm_policy_kill(ret
);
769 EXPORT_SYMBOL(xfrm_policy_byid
);
771 #ifdef CONFIG_SECURITY_NETWORK_XFRM
773 xfrm_policy_flush_secctx_check(struct net
*net
, u8 type
, struct xfrm_audit
*audit_info
)
777 for (dir
= 0; dir
< XFRM_POLICY_MAX
; dir
++) {
778 struct xfrm_policy
*pol
;
781 hlist_for_each_entry(pol
,
782 &net
->xfrm
.policy_inexact
[dir
], bydst
) {
783 if (pol
->type
!= type
)
785 err
= security_xfrm_policy_delete(pol
->security
);
787 xfrm_audit_policy_delete(pol
, 0,
788 audit_info
->loginuid
,
789 audit_info
->sessionid
,
794 for (i
= net
->xfrm
.policy_bydst
[dir
].hmask
; i
>= 0; i
--) {
795 hlist_for_each_entry(pol
,
796 net
->xfrm
.policy_bydst
[dir
].table
+ i
,
798 if (pol
->type
!= type
)
800 err
= security_xfrm_policy_delete(
803 xfrm_audit_policy_delete(pol
, 0,
804 audit_info
->loginuid
,
805 audit_info
->sessionid
,
816 xfrm_policy_flush_secctx_check(struct net
*net
, u8 type
, struct xfrm_audit
*audit_info
)
822 int xfrm_policy_flush(struct net
*net
, u8 type
, struct xfrm_audit
*audit_info
)
824 int dir
, err
= 0, cnt
= 0;
826 write_lock_bh(&xfrm_policy_lock
);
828 err
= xfrm_policy_flush_secctx_check(net
, type
, audit_info
);
832 for (dir
= 0; dir
< XFRM_POLICY_MAX
; dir
++) {
833 struct xfrm_policy
*pol
;
837 hlist_for_each_entry(pol
,
838 &net
->xfrm
.policy_inexact
[dir
], bydst
) {
839 if (pol
->type
!= type
)
841 __xfrm_policy_unlink(pol
, dir
);
842 write_unlock_bh(&xfrm_policy_lock
);
845 xfrm_audit_policy_delete(pol
, 1, audit_info
->loginuid
,
846 audit_info
->sessionid
,
849 xfrm_policy_kill(pol
);
851 write_lock_bh(&xfrm_policy_lock
);
855 for (i
= net
->xfrm
.policy_bydst
[dir
].hmask
; i
>= 0; i
--) {
857 hlist_for_each_entry(pol
,
858 net
->xfrm
.policy_bydst
[dir
].table
+ i
,
860 if (pol
->type
!= type
)
862 __xfrm_policy_unlink(pol
, dir
);
863 write_unlock_bh(&xfrm_policy_lock
);
866 xfrm_audit_policy_delete(pol
, 1,
867 audit_info
->loginuid
,
868 audit_info
->sessionid
,
870 xfrm_policy_kill(pol
);
872 write_lock_bh(&xfrm_policy_lock
);
881 write_unlock_bh(&xfrm_policy_lock
);
884 EXPORT_SYMBOL(xfrm_policy_flush
);
886 int xfrm_policy_walk(struct net
*net
, struct xfrm_policy_walk
*walk
,
887 int (*func
)(struct xfrm_policy
*, int, int, void*),
890 struct xfrm_policy
*pol
;
891 struct xfrm_policy_walk_entry
*x
;
894 if (walk
->type
>= XFRM_POLICY_TYPE_MAX
&&
895 walk
->type
!= XFRM_POLICY_TYPE_ANY
)
898 if (list_empty(&walk
->walk
.all
) && walk
->seq
!= 0)
901 write_lock_bh(&xfrm_policy_lock
);
902 if (list_empty(&walk
->walk
.all
))
903 x
= list_first_entry(&net
->xfrm
.policy_all
, struct xfrm_policy_walk_entry
, all
);
905 x
= list_entry(&walk
->walk
.all
, struct xfrm_policy_walk_entry
, all
);
906 list_for_each_entry_from(x
, &net
->xfrm
.policy_all
, all
) {
909 pol
= container_of(x
, struct xfrm_policy
, walk
);
910 if (walk
->type
!= XFRM_POLICY_TYPE_ANY
&&
911 walk
->type
!= pol
->type
)
913 error
= func(pol
, xfrm_policy_id2dir(pol
->index
),
916 list_move_tail(&walk
->walk
.all
, &x
->all
);
921 if (walk
->seq
== 0) {
925 list_del_init(&walk
->walk
.all
);
927 write_unlock_bh(&xfrm_policy_lock
);
930 EXPORT_SYMBOL(xfrm_policy_walk
);
932 void xfrm_policy_walk_init(struct xfrm_policy_walk
*walk
, u8 type
)
934 INIT_LIST_HEAD(&walk
->walk
.all
);
939 EXPORT_SYMBOL(xfrm_policy_walk_init
);
941 void xfrm_policy_walk_done(struct xfrm_policy_walk
*walk
)
943 if (list_empty(&walk
->walk
.all
))
946 write_lock_bh(&xfrm_policy_lock
);
947 list_del(&walk
->walk
.all
);
948 write_unlock_bh(&xfrm_policy_lock
);
950 EXPORT_SYMBOL(xfrm_policy_walk_done
);
953 * Find policy to apply to this flow.
955 * Returns 0 if policy found, else an -errno.
957 static int xfrm_policy_match(const struct xfrm_policy
*pol
,
958 const struct flowi
*fl
,
959 u8 type
, u16 family
, int dir
)
961 const struct xfrm_selector
*sel
= &pol
->selector
;
965 if (pol
->family
!= family
||
966 (fl
->flowi_mark
& pol
->mark
.m
) != pol
->mark
.v
||
970 match
= xfrm_selector_match(sel
, fl
, family
);
972 ret
= security_xfrm_policy_lookup(pol
->security
, fl
->flowi_secid
,
978 static struct xfrm_policy
*xfrm_policy_lookup_bytype(struct net
*net
, u8 type
,
979 const struct flowi
*fl
,
983 struct xfrm_policy
*pol
, *ret
;
984 const xfrm_address_t
*daddr
, *saddr
;
985 struct hlist_head
*chain
;
988 daddr
= xfrm_flowi_daddr(fl
, family
);
989 saddr
= xfrm_flowi_saddr(fl
, family
);
990 if (unlikely(!daddr
|| !saddr
))
993 read_lock_bh(&xfrm_policy_lock
);
994 chain
= policy_hash_direct(net
, daddr
, saddr
, family
, dir
);
996 hlist_for_each_entry(pol
, chain
, bydst
) {
997 err
= xfrm_policy_match(pol
, fl
, type
, family
, dir
);
1007 priority
= ret
->priority
;
1011 chain
= &net
->xfrm
.policy_inexact
[dir
];
1012 hlist_for_each_entry(pol
, chain
, bydst
) {
1013 err
= xfrm_policy_match(pol
, fl
, type
, family
, dir
);
1021 } else if (pol
->priority
< priority
) {
1029 read_unlock_bh(&xfrm_policy_lock
);
1034 static struct xfrm_policy
*
1035 __xfrm_policy_lookup(struct net
*net
, const struct flowi
*fl
, u16 family
, u8 dir
)
1037 #ifdef CONFIG_XFRM_SUB_POLICY
1038 struct xfrm_policy
*pol
;
1040 pol
= xfrm_policy_lookup_bytype(net
, XFRM_POLICY_TYPE_SUB
, fl
, family
, dir
);
1044 return xfrm_policy_lookup_bytype(net
, XFRM_POLICY_TYPE_MAIN
, fl
, family
, dir
);
1047 static int flow_to_policy_dir(int dir
)
1049 if (XFRM_POLICY_IN
== FLOW_DIR_IN
&&
1050 XFRM_POLICY_OUT
== FLOW_DIR_OUT
&&
1051 XFRM_POLICY_FWD
== FLOW_DIR_FWD
)
1057 return XFRM_POLICY_IN
;
1059 return XFRM_POLICY_OUT
;
1061 return XFRM_POLICY_FWD
;
1065 static struct flow_cache_object
*
1066 xfrm_policy_lookup(struct net
*net
, const struct flowi
*fl
, u16 family
,
1067 u8 dir
, struct flow_cache_object
*old_obj
, void *ctx
)
1069 struct xfrm_policy
*pol
;
1072 xfrm_pol_put(container_of(old_obj
, struct xfrm_policy
, flo
));
1074 pol
= __xfrm_policy_lookup(net
, fl
, family
, flow_to_policy_dir(dir
));
1075 if (IS_ERR_OR_NULL(pol
))
1076 return ERR_CAST(pol
);
1078 /* Resolver returns two references:
1079 * one for cache and one for caller of flow_cache_lookup() */
1085 static inline int policy_to_flow_dir(int dir
)
1087 if (XFRM_POLICY_IN
== FLOW_DIR_IN
&&
1088 XFRM_POLICY_OUT
== FLOW_DIR_OUT
&&
1089 XFRM_POLICY_FWD
== FLOW_DIR_FWD
)
1093 case XFRM_POLICY_IN
:
1095 case XFRM_POLICY_OUT
:
1096 return FLOW_DIR_OUT
;
1097 case XFRM_POLICY_FWD
:
1098 return FLOW_DIR_FWD
;
1102 static struct xfrm_policy
*xfrm_sk_policy_lookup(struct sock
*sk
, int dir
,
1103 const struct flowi
*fl
)
1105 struct xfrm_policy
*pol
;
1107 read_lock_bh(&xfrm_policy_lock
);
1108 if ((pol
= sk
->sk_policy
[dir
]) != NULL
) {
1109 bool match
= xfrm_selector_match(&pol
->selector
, fl
,
1114 if ((sk
->sk_mark
& pol
->mark
.m
) != pol
->mark
.v
) {
1118 err
= security_xfrm_policy_lookup(pol
->security
,
1120 policy_to_flow_dir(dir
));
1123 else if (err
== -ESRCH
)
1131 read_unlock_bh(&xfrm_policy_lock
);
1135 static void __xfrm_policy_link(struct xfrm_policy
*pol
, int dir
)
1137 struct net
*net
= xp_net(pol
);
1138 struct hlist_head
*chain
= policy_hash_bysel(net
, &pol
->selector
,
1141 list_add(&pol
->walk
.all
, &net
->xfrm
.policy_all
);
1142 hlist_add_head(&pol
->bydst
, chain
);
1143 hlist_add_head(&pol
->byidx
, net
->xfrm
.policy_byidx
+idx_hash(net
, pol
->index
));
1144 net
->xfrm
.policy_count
[dir
]++;
1147 if (xfrm_bydst_should_resize(net
, dir
, NULL
))
1148 schedule_work(&net
->xfrm
.policy_hash_work
);
1151 static struct xfrm_policy
*__xfrm_policy_unlink(struct xfrm_policy
*pol
,
1154 struct net
*net
= xp_net(pol
);
1156 if (hlist_unhashed(&pol
->bydst
))
1159 hlist_del(&pol
->bydst
);
1160 hlist_del(&pol
->byidx
);
1161 list_del(&pol
->walk
.all
);
1162 net
->xfrm
.policy_count
[dir
]--;
1167 int xfrm_policy_delete(struct xfrm_policy
*pol
, int dir
)
1169 write_lock_bh(&xfrm_policy_lock
);
1170 pol
= __xfrm_policy_unlink(pol
, dir
);
1171 write_unlock_bh(&xfrm_policy_lock
);
1173 xfrm_policy_kill(pol
);
1178 EXPORT_SYMBOL(xfrm_policy_delete
);
1180 int xfrm_sk_policy_insert(struct sock
*sk
, int dir
, struct xfrm_policy
*pol
)
1182 struct net
*net
= xp_net(pol
);
1183 struct xfrm_policy
*old_pol
;
1185 #ifdef CONFIG_XFRM_SUB_POLICY
1186 if (pol
&& pol
->type
!= XFRM_POLICY_TYPE_MAIN
)
1190 write_lock_bh(&xfrm_policy_lock
);
1191 old_pol
= sk
->sk_policy
[dir
];
1192 sk
->sk_policy
[dir
] = pol
;
1194 pol
->curlft
.add_time
= get_seconds();
1195 pol
->index
= xfrm_gen_index(net
, XFRM_POLICY_MAX
+dir
);
1196 __xfrm_policy_link(pol
, XFRM_POLICY_MAX
+dir
);
1200 xfrm_policy_requeue(old_pol
, pol
);
1202 /* Unlinking succeeds always. This is the only function
1203 * allowed to delete or replace socket policy.
1205 __xfrm_policy_unlink(old_pol
, XFRM_POLICY_MAX
+dir
);
1207 write_unlock_bh(&xfrm_policy_lock
);
1210 xfrm_policy_kill(old_pol
);
1215 static struct xfrm_policy
*clone_policy(const struct xfrm_policy
*old
, int dir
)
1217 struct xfrm_policy
*newp
= xfrm_policy_alloc(xp_net(old
), GFP_ATOMIC
);
1220 newp
->selector
= old
->selector
;
1221 if (security_xfrm_policy_clone(old
->security
,
1224 return NULL
; /* ENOMEM */
1226 newp
->lft
= old
->lft
;
1227 newp
->curlft
= old
->curlft
;
1228 newp
->mark
= old
->mark
;
1229 newp
->action
= old
->action
;
1230 newp
->flags
= old
->flags
;
1231 newp
->xfrm_nr
= old
->xfrm_nr
;
1232 newp
->index
= old
->index
;
1233 newp
->type
= old
->type
;
1234 memcpy(newp
->xfrm_vec
, old
->xfrm_vec
,
1235 newp
->xfrm_nr
*sizeof(struct xfrm_tmpl
));
1236 write_lock_bh(&xfrm_policy_lock
);
1237 __xfrm_policy_link(newp
, XFRM_POLICY_MAX
+dir
);
1238 write_unlock_bh(&xfrm_policy_lock
);
1244 int __xfrm_sk_clone_policy(struct sock
*sk
)
1246 struct xfrm_policy
*p0
= sk
->sk_policy
[0],
1247 *p1
= sk
->sk_policy
[1];
1249 sk
->sk_policy
[0] = sk
->sk_policy
[1] = NULL
;
1250 if (p0
&& (sk
->sk_policy
[0] = clone_policy(p0
, 0)) == NULL
)
1252 if (p1
&& (sk
->sk_policy
[1] = clone_policy(p1
, 1)) == NULL
)
1258 xfrm_get_saddr(struct net
*net
, xfrm_address_t
*local
, xfrm_address_t
*remote
,
1259 unsigned short family
)
1262 struct xfrm_policy_afinfo
*afinfo
= xfrm_policy_get_afinfo(family
);
1264 if (unlikely(afinfo
== NULL
))
1266 err
= afinfo
->get_saddr(net
, local
, remote
);
1267 xfrm_policy_put_afinfo(afinfo
);
1271 /* Resolve list of templates for the flow, given policy. */
1274 xfrm_tmpl_resolve_one(struct xfrm_policy
*policy
, const struct flowi
*fl
,
1275 struct xfrm_state
**xfrm
, unsigned short family
)
1277 struct net
*net
= xp_net(policy
);
1280 xfrm_address_t
*daddr
= xfrm_flowi_daddr(fl
, family
);
1281 xfrm_address_t
*saddr
= xfrm_flowi_saddr(fl
, family
);
1284 for (nx
=0, i
= 0; i
< policy
->xfrm_nr
; i
++) {
1285 struct xfrm_state
*x
;
1286 xfrm_address_t
*remote
= daddr
;
1287 xfrm_address_t
*local
= saddr
;
1288 struct xfrm_tmpl
*tmpl
= &policy
->xfrm_vec
[i
];
1290 if (tmpl
->mode
== XFRM_MODE_TUNNEL
||
1291 tmpl
->mode
== XFRM_MODE_BEET
) {
1292 remote
= &tmpl
->id
.daddr
;
1293 local
= &tmpl
->saddr
;
1294 if (xfrm_addr_any(local
, tmpl
->encap_family
)) {
1295 error
= xfrm_get_saddr(net
, &tmp
, remote
, tmpl
->encap_family
);
1302 x
= xfrm_state_find(remote
, local
, fl
, tmpl
, policy
, &error
, family
);
1304 if (x
&& x
->km
.state
== XFRM_STATE_VALID
) {
1311 error
= (x
->km
.state
== XFRM_STATE_ERROR
?
1315 else if (error
== -ESRCH
)
1318 if (!tmpl
->optional
)
1324 for (nx
--; nx
>=0; nx
--)
1325 xfrm_state_put(xfrm
[nx
]);
1330 xfrm_tmpl_resolve(struct xfrm_policy
**pols
, int npols
, const struct flowi
*fl
,
1331 struct xfrm_state
**xfrm
, unsigned short family
)
1333 struct xfrm_state
*tp
[XFRM_MAX_DEPTH
];
1334 struct xfrm_state
**tpp
= (npols
> 1) ? tp
: xfrm
;
1340 for (i
= 0; i
< npols
; i
++) {
1341 if (cnx
+ pols
[i
]->xfrm_nr
>= XFRM_MAX_DEPTH
) {
1346 ret
= xfrm_tmpl_resolve_one(pols
[i
], fl
, &tpp
[cnx
], family
);
1354 /* found states are sorted for outbound processing */
1356 xfrm_state_sort(xfrm
, tpp
, cnx
, family
);
1361 for (cnx
--; cnx
>=0; cnx
--)
1362 xfrm_state_put(tpp
[cnx
]);
1367 /* Check that the bundle accepts the flow and its components are
1371 static inline int xfrm_get_tos(const struct flowi
*fl
, int family
)
1373 struct xfrm_policy_afinfo
*afinfo
= xfrm_policy_get_afinfo(family
);
1379 tos
= afinfo
->get_tos(fl
);
1381 xfrm_policy_put_afinfo(afinfo
);
1386 static struct flow_cache_object
*xfrm_bundle_flo_get(struct flow_cache_object
*flo
)
1388 struct xfrm_dst
*xdst
= container_of(flo
, struct xfrm_dst
, flo
);
1389 struct dst_entry
*dst
= &xdst
->u
.dst
;
1391 if (xdst
->route
== NULL
) {
1392 /* Dummy bundle - if it has xfrms we were not
1393 * able to build bundle as template resolution failed.
1394 * It means we need to try again resolving. */
1395 if (xdst
->num_xfrms
> 0)
1397 } else if (dst
->flags
& DST_XFRM_QUEUE
) {
1401 if (stale_bundle(dst
))
1409 static int xfrm_bundle_flo_check(struct flow_cache_object
*flo
)
1411 struct xfrm_dst
*xdst
= container_of(flo
, struct xfrm_dst
, flo
);
1412 struct dst_entry
*dst
= &xdst
->u
.dst
;
1416 if (stale_bundle(dst
))
1422 static void xfrm_bundle_flo_delete(struct flow_cache_object
*flo
)
1424 struct xfrm_dst
*xdst
= container_of(flo
, struct xfrm_dst
, flo
);
1425 struct dst_entry
*dst
= &xdst
->u
.dst
;
1430 static const struct flow_cache_ops xfrm_bundle_fc_ops
= {
1431 .get
= xfrm_bundle_flo_get
,
1432 .check
= xfrm_bundle_flo_check
,
1433 .delete = xfrm_bundle_flo_delete
,
1436 static inline struct xfrm_dst
*xfrm_alloc_dst(struct net
*net
, int family
)
1438 struct xfrm_policy_afinfo
*afinfo
= xfrm_policy_get_afinfo(family
);
1439 struct dst_ops
*dst_ops
;
1440 struct xfrm_dst
*xdst
;
1443 return ERR_PTR(-EINVAL
);
1447 dst_ops
= &net
->xfrm
.xfrm4_dst_ops
;
1449 #if IS_ENABLED(CONFIG_IPV6)
1451 dst_ops
= &net
->xfrm
.xfrm6_dst_ops
;
1457 xdst
= dst_alloc(dst_ops
, NULL
, 0, DST_OBSOLETE_NONE
, 0);
1460 struct dst_entry
*dst
= &xdst
->u
.dst
;
1462 memset(dst
+ 1, 0, sizeof(*xdst
) - sizeof(*dst
));
1463 xdst
->flo
.ops
= &xfrm_bundle_fc_ops
;
1464 if (afinfo
->init_dst
)
1465 afinfo
->init_dst(net
, xdst
);
1467 xdst
= ERR_PTR(-ENOBUFS
);
1469 xfrm_policy_put_afinfo(afinfo
);
1474 static inline int xfrm_init_path(struct xfrm_dst
*path
, struct dst_entry
*dst
,
1477 struct xfrm_policy_afinfo
*afinfo
=
1478 xfrm_policy_get_afinfo(dst
->ops
->family
);
1484 err
= afinfo
->init_path(path
, dst
, nfheader_len
);
1486 xfrm_policy_put_afinfo(afinfo
);
1491 static inline int xfrm_fill_dst(struct xfrm_dst
*xdst
, struct net_device
*dev
,
1492 const struct flowi
*fl
)
1494 struct xfrm_policy_afinfo
*afinfo
=
1495 xfrm_policy_get_afinfo(xdst
->u
.dst
.ops
->family
);
1501 err
= afinfo
->fill_dst(xdst
, dev
, fl
);
1503 xfrm_policy_put_afinfo(afinfo
);
1509 /* Allocate chain of dst_entry's, attach known xfrm's, calculate
1510 * all the metrics... Shortly, bundle a bundle.
1513 static struct dst_entry
*xfrm_bundle_create(struct xfrm_policy
*policy
,
1514 struct xfrm_state
**xfrm
, int nx
,
1515 const struct flowi
*fl
,
1516 struct dst_entry
*dst
)
1518 struct net
*net
= xp_net(policy
);
1519 unsigned long now
= jiffies
;
1520 struct net_device
*dev
;
1521 struct xfrm_mode
*inner_mode
;
1522 struct dst_entry
*dst_prev
= NULL
;
1523 struct dst_entry
*dst0
= NULL
;
1527 int nfheader_len
= 0;
1528 int trailer_len
= 0;
1530 int family
= policy
->selector
.family
;
1531 xfrm_address_t saddr
, daddr
;
1533 xfrm_flowi_addr_get(fl
, &saddr
, &daddr
, family
);
1535 tos
= xfrm_get_tos(fl
, family
);
1542 for (; i
< nx
; i
++) {
1543 struct xfrm_dst
*xdst
= xfrm_alloc_dst(net
, family
);
1544 struct dst_entry
*dst1
= &xdst
->u
.dst
;
1546 err
= PTR_ERR(xdst
);
1552 if (xfrm
[i
]->sel
.family
== AF_UNSPEC
) {
1553 inner_mode
= xfrm_ip2inner_mode(xfrm
[i
],
1554 xfrm_af2proto(family
));
1556 err
= -EAFNOSUPPORT
;
1561 inner_mode
= xfrm
[i
]->inner_mode
;
1566 dst_prev
->child
= dst_clone(dst1
);
1567 dst1
->flags
|= DST_NOHASH
;
1571 dst_copy_metrics(dst1
, dst
);
1573 if (xfrm
[i
]->props
.mode
!= XFRM_MODE_TRANSPORT
) {
1574 family
= xfrm
[i
]->props
.family
;
1575 dst
= xfrm_dst_lookup(xfrm
[i
], tos
, &saddr
, &daddr
,
1583 dst1
->xfrm
= xfrm
[i
];
1584 xdst
->xfrm_genid
= xfrm
[i
]->genid
;
1586 dst1
->obsolete
= DST_OBSOLETE_FORCE_CHK
;
1587 dst1
->flags
|= DST_HOST
;
1588 dst1
->lastuse
= now
;
1590 dst1
->input
= dst_discard
;
1591 dst1
->output
= inner_mode
->afinfo
->output
;
1593 dst1
->next
= dst_prev
;
1596 header_len
+= xfrm
[i
]->props
.header_len
;
1597 if (xfrm
[i
]->type
->flags
& XFRM_TYPE_NON_FRAGMENT
)
1598 nfheader_len
+= xfrm
[i
]->props
.header_len
;
1599 trailer_len
+= xfrm
[i
]->props
.trailer_len
;
1602 dst_prev
->child
= dst
;
1610 xfrm_init_path((struct xfrm_dst
*)dst0
, dst
, nfheader_len
);
1611 xfrm_init_pmtu(dst_prev
);
1613 for (dst_prev
= dst0
; dst_prev
!= dst
; dst_prev
= dst_prev
->child
) {
1614 struct xfrm_dst
*xdst
= (struct xfrm_dst
*)dst_prev
;
1616 err
= xfrm_fill_dst(xdst
, dev
, fl
);
1620 dst_prev
->header_len
= header_len
;
1621 dst_prev
->trailer_len
= trailer_len
;
1622 header_len
-= xdst
->u
.dst
.xfrm
->props
.header_len
;
1623 trailer_len
-= xdst
->u
.dst
.xfrm
->props
.trailer_len
;
1631 xfrm_state_put(xfrm
[i
]);
1635 dst0
= ERR_PTR(err
);
1640 xfrm_dst_alloc_copy(void **target
, const void *src
, int size
)
1643 *target
= kmalloc(size
, GFP_ATOMIC
);
1647 memcpy(*target
, src
, size
);
1652 xfrm_dst_update_parent(struct dst_entry
*dst
, const struct xfrm_selector
*sel
)
1654 #ifdef CONFIG_XFRM_SUB_POLICY
1655 struct xfrm_dst
*xdst
= (struct xfrm_dst
*)dst
;
1656 return xfrm_dst_alloc_copy((void **)&(xdst
->partner
),
1664 xfrm_dst_update_origin(struct dst_entry
*dst
, const struct flowi
*fl
)
1666 #ifdef CONFIG_XFRM_SUB_POLICY
1667 struct xfrm_dst
*xdst
= (struct xfrm_dst
*)dst
;
1668 return xfrm_dst_alloc_copy((void **)&(xdst
->origin
), fl
, sizeof(*fl
));
1674 static int xfrm_expand_policies(const struct flowi
*fl
, u16 family
,
1675 struct xfrm_policy
**pols
,
1676 int *num_pols
, int *num_xfrms
)
1680 if (*num_pols
== 0 || !pols
[0]) {
1685 if (IS_ERR(pols
[0]))
1686 return PTR_ERR(pols
[0]);
1688 *num_xfrms
= pols
[0]->xfrm_nr
;
1690 #ifdef CONFIG_XFRM_SUB_POLICY
1691 if (pols
[0] && pols
[0]->action
== XFRM_POLICY_ALLOW
&&
1692 pols
[0]->type
!= XFRM_POLICY_TYPE_MAIN
) {
1693 pols
[1] = xfrm_policy_lookup_bytype(xp_net(pols
[0]),
1694 XFRM_POLICY_TYPE_MAIN
,
1698 if (IS_ERR(pols
[1])) {
1699 xfrm_pols_put(pols
, *num_pols
);
1700 return PTR_ERR(pols
[1]);
1703 (*num_xfrms
) += pols
[1]->xfrm_nr
;
1707 for (i
= 0; i
< *num_pols
; i
++) {
1708 if (pols
[i
]->action
!= XFRM_POLICY_ALLOW
) {
1718 static struct xfrm_dst
*
1719 xfrm_resolve_and_create_bundle(struct xfrm_policy
**pols
, int num_pols
,
1720 const struct flowi
*fl
, u16 family
,
1721 struct dst_entry
*dst_orig
)
1723 struct net
*net
= xp_net(pols
[0]);
1724 struct xfrm_state
*xfrm
[XFRM_MAX_DEPTH
];
1725 struct dst_entry
*dst
;
1726 struct xfrm_dst
*xdst
;
1729 /* Try to instantiate a bundle */
1730 err
= xfrm_tmpl_resolve(pols
, num_pols
, fl
, xfrm
, family
);
1732 if (err
!= 0 && err
!= -EAGAIN
)
1733 XFRM_INC_STATS(net
, LINUX_MIB_XFRMOUTPOLERROR
);
1734 return ERR_PTR(err
);
1737 dst
= xfrm_bundle_create(pols
[0], xfrm
, err
, fl
, dst_orig
);
1739 XFRM_INC_STATS(net
, LINUX_MIB_XFRMOUTBUNDLEGENERROR
);
1740 return ERR_CAST(dst
);
1743 xdst
= (struct xfrm_dst
*)dst
;
1744 xdst
->num_xfrms
= err
;
1746 err
= xfrm_dst_update_parent(dst
, &pols
[1]->selector
);
1748 err
= xfrm_dst_update_origin(dst
, fl
);
1749 if (unlikely(err
)) {
1751 XFRM_INC_STATS(net
, LINUX_MIB_XFRMOUTBUNDLECHECKERROR
);
1752 return ERR_PTR(err
);
1755 xdst
->num_pols
= num_pols
;
1756 memcpy(xdst
->pols
, pols
, sizeof(struct xfrm_policy
*) * num_pols
);
1757 xdst
->policy_genid
= atomic_read(&pols
[0]->genid
);
1762 static void xfrm_policy_queue_process(unsigned long arg
)
1765 struct sk_buff
*skb
;
1767 struct dst_entry
*dst
;
1768 struct xfrm_policy
*pol
= (struct xfrm_policy
*)arg
;
1769 struct xfrm_policy_queue
*pq
= &pol
->polq
;
1771 struct sk_buff_head list
;
1773 spin_lock(&pq
->hold_queue
.lock
);
1774 skb
= skb_peek(&pq
->hold_queue
);
1776 spin_unlock(&pq
->hold_queue
.lock
);
1781 xfrm_decode_session(skb
, &fl
, dst
->ops
->family
);
1782 spin_unlock(&pq
->hold_queue
.lock
);
1784 dst_hold(dst
->path
);
1785 dst
= xfrm_lookup(xp_net(pol
), dst
->path
, &fl
,
1790 if (dst
->flags
& DST_XFRM_QUEUE
) {
1793 if (pq
->timeout
>= XFRM_QUEUE_TMO_MAX
)
1796 pq
->timeout
= pq
->timeout
<< 1;
1797 if (!mod_timer(&pq
->hold_timer
, jiffies
+ pq
->timeout
))
1804 __skb_queue_head_init(&list
);
1806 spin_lock(&pq
->hold_queue
.lock
);
1808 skb_queue_splice_init(&pq
->hold_queue
, &list
);
1809 spin_unlock(&pq
->hold_queue
.lock
);
1811 while (!skb_queue_empty(&list
)) {
1812 skb
= __skb_dequeue(&list
);
1814 xfrm_decode_session(skb
, &fl
, skb_dst(skb
)->ops
->family
);
1815 dst_hold(skb_dst(skb
)->path
);
1816 dst
= xfrm_lookup(xp_net(pol
), skb_dst(skb
)->path
,
1825 skb_dst_set(skb
, dst
);
1827 err
= dst_output(skb
);
1836 xfrm_queue_purge(&pq
->hold_queue
);
1840 static int xdst_queue_output(struct sk_buff
*skb
)
1842 unsigned long sched_next
;
1843 struct dst_entry
*dst
= skb_dst(skb
);
1844 struct xfrm_dst
*xdst
= (struct xfrm_dst
*) dst
;
1845 struct xfrm_policy
*pol
= xdst
->pols
[0];
1846 struct xfrm_policy_queue
*pq
= &pol
->polq
;
1848 if (pq
->hold_queue
.qlen
> XFRM_MAX_QUEUE_LEN
) {
1855 spin_lock_bh(&pq
->hold_queue
.lock
);
1858 pq
->timeout
= XFRM_QUEUE_TMO_MIN
;
1860 sched_next
= jiffies
+ pq
->timeout
;
1862 if (del_timer(&pq
->hold_timer
)) {
1863 if (time_before(pq
->hold_timer
.expires
, sched_next
))
1864 sched_next
= pq
->hold_timer
.expires
;
1868 __skb_queue_tail(&pq
->hold_queue
, skb
);
1869 if (!mod_timer(&pq
->hold_timer
, sched_next
))
1872 spin_unlock_bh(&pq
->hold_queue
.lock
);
1877 static struct xfrm_dst
*xfrm_create_dummy_bundle(struct net
*net
,
1878 struct dst_entry
*dst
,
1879 const struct flowi
*fl
,
1884 struct net_device
*dev
;
1885 struct dst_entry
*dst1
;
1886 struct xfrm_dst
*xdst
;
1888 xdst
= xfrm_alloc_dst(net
, family
);
1892 if (net
->xfrm
.sysctl_larval_drop
|| num_xfrms
<= 0 ||
1893 (fl
->flowi_flags
& FLOWI_FLAG_CAN_SLEEP
))
1896 dst1
= &xdst
->u
.dst
;
1900 dst_copy_metrics(dst1
, dst
);
1902 dst1
->obsolete
= DST_OBSOLETE_FORCE_CHK
;
1903 dst1
->flags
|= DST_HOST
| DST_XFRM_QUEUE
;
1904 dst1
->lastuse
= jiffies
;
1906 dst1
->input
= dst_discard
;
1907 dst1
->output
= xdst_queue_output
;
1913 xfrm_init_path((struct xfrm_dst
*)dst1
, dst
, 0);
1920 err
= xfrm_fill_dst(xdst
, dev
, fl
);
1929 xdst
= ERR_PTR(err
);
1933 static struct flow_cache_object
*
1934 xfrm_bundle_lookup(struct net
*net
, const struct flowi
*fl
, u16 family
, u8 dir
,
1935 struct flow_cache_object
*oldflo
, void *ctx
)
1937 struct dst_entry
*dst_orig
= (struct dst_entry
*)ctx
;
1938 struct xfrm_policy
*pols
[XFRM_POLICY_TYPE_MAX
];
1939 struct xfrm_dst
*xdst
, *new_xdst
;
1940 int num_pols
= 0, num_xfrms
= 0, i
, err
, pol_dead
;
1942 /* Check if the policies from old bundle are usable */
1945 xdst
= container_of(oldflo
, struct xfrm_dst
, flo
);
1946 num_pols
= xdst
->num_pols
;
1947 num_xfrms
= xdst
->num_xfrms
;
1949 for (i
= 0; i
< num_pols
; i
++) {
1950 pols
[i
] = xdst
->pols
[i
];
1951 pol_dead
|= pols
[i
]->walk
.dead
;
1954 dst_free(&xdst
->u
.dst
);
1962 /* Resolve policies to use if we couldn't get them from
1963 * previous cache entry */
1966 pols
[0] = __xfrm_policy_lookup(net
, fl
, family
,
1967 flow_to_policy_dir(dir
));
1968 err
= xfrm_expand_policies(fl
, family
, pols
,
1969 &num_pols
, &num_xfrms
);
1975 goto make_dummy_bundle
;
1978 new_xdst
= xfrm_resolve_and_create_bundle(pols
, num_pols
, fl
, family
, dst_orig
);
1979 if (IS_ERR(new_xdst
)) {
1980 err
= PTR_ERR(new_xdst
);
1984 goto make_dummy_bundle
;
1985 dst_hold(&xdst
->u
.dst
);
1987 } else if (new_xdst
== NULL
) {
1990 goto make_dummy_bundle
;
1991 xdst
->num_xfrms
= 0;
1992 dst_hold(&xdst
->u
.dst
);
1996 /* Kill the previous bundle */
1998 /* The policies were stolen for newly generated bundle */
2000 dst_free(&xdst
->u
.dst
);
2003 /* Flow cache does not have reference, it dst_free()'s,
2004 * but we do need to return one reference for original caller */
2005 dst_hold(&new_xdst
->u
.dst
);
2006 return &new_xdst
->flo
;
2009 /* We found policies, but there's no bundles to instantiate:
2010 * either because the policy blocks, has no transformations or
2011 * we could not build template (no xfrm_states).*/
2012 xdst
= xfrm_create_dummy_bundle(net
, dst_orig
, fl
, num_xfrms
, family
);
2014 xfrm_pols_put(pols
, num_pols
);
2015 return ERR_CAST(xdst
);
2017 xdst
->num_pols
= num_pols
;
2018 xdst
->num_xfrms
= num_xfrms
;
2019 memcpy(xdst
->pols
, pols
, sizeof(struct xfrm_policy
*) * num_pols
);
2021 dst_hold(&xdst
->u
.dst
);
2025 XFRM_INC_STATS(net
, LINUX_MIB_XFRMOUTPOLERROR
);
2028 dst_free(&xdst
->u
.dst
);
2030 xfrm_pols_put(pols
, num_pols
);
2031 return ERR_PTR(err
);
2034 static struct dst_entry
*make_blackhole(struct net
*net
, u16 family
,
2035 struct dst_entry
*dst_orig
)
2037 struct xfrm_policy_afinfo
*afinfo
= xfrm_policy_get_afinfo(family
);
2038 struct dst_entry
*ret
;
2041 dst_release(dst_orig
);
2042 return ERR_PTR(-EINVAL
);
2044 ret
= afinfo
->blackhole_route(net
, dst_orig
);
2046 xfrm_policy_put_afinfo(afinfo
);
2051 /* Main function: finds/creates a bundle for given flow.
2053 * At the moment we eat a raw IP route. Mostly to speed up lookups
2054 * on interfaces with disabled IPsec.
2056 struct dst_entry
*xfrm_lookup(struct net
*net
, struct dst_entry
*dst_orig
,
2057 const struct flowi
*fl
,
2058 struct sock
*sk
, int flags
)
2060 struct xfrm_policy
*pols
[XFRM_POLICY_TYPE_MAX
];
2061 struct flow_cache_object
*flo
;
2062 struct xfrm_dst
*xdst
;
2063 struct dst_entry
*dst
, *route
;
2064 u16 family
= dst_orig
->ops
->family
;
2065 u8 dir
= policy_to_flow_dir(XFRM_POLICY_OUT
);
2066 int i
, err
, num_pols
, num_xfrms
= 0, drop_pols
= 0;
2073 if (sk
&& sk
->sk_policy
[XFRM_POLICY_OUT
]) {
2075 pols
[0] = xfrm_sk_policy_lookup(sk
, XFRM_POLICY_OUT
, fl
);
2076 err
= xfrm_expand_policies(fl
, family
, pols
,
2077 &num_pols
, &num_xfrms
);
2082 if (num_xfrms
<= 0) {
2083 drop_pols
= num_pols
;
2087 xdst
= xfrm_resolve_and_create_bundle(
2091 xfrm_pols_put(pols
, num_pols
);
2092 err
= PTR_ERR(xdst
);
2094 } else if (xdst
== NULL
) {
2096 drop_pols
= num_pols
;
2100 dst_hold(&xdst
->u
.dst
);
2102 spin_lock_bh(&xfrm_policy_sk_bundle_lock
);
2103 xdst
->u
.dst
.next
= xfrm_policy_sk_bundles
;
2104 xfrm_policy_sk_bundles
= &xdst
->u
.dst
;
2105 spin_unlock_bh(&xfrm_policy_sk_bundle_lock
);
2107 route
= xdst
->route
;
2112 /* To accelerate a bit... */
2113 if ((dst_orig
->flags
& DST_NOXFRM
) ||
2114 !net
->xfrm
.policy_count
[XFRM_POLICY_OUT
])
2117 flo
= flow_cache_lookup(net
, fl
, family
, dir
,
2118 xfrm_bundle_lookup
, dst_orig
);
2125 xdst
= container_of(flo
, struct xfrm_dst
, flo
);
2127 num_pols
= xdst
->num_pols
;
2128 num_xfrms
= xdst
->num_xfrms
;
2129 memcpy(pols
, xdst
->pols
, sizeof(struct xfrm_policy
*) * num_pols
);
2130 route
= xdst
->route
;
2134 if (route
== NULL
&& num_xfrms
> 0) {
2135 /* The only case when xfrm_bundle_lookup() returns a
2136 * bundle with null route, is when the template could
2137 * not be resolved. It means policies are there, but
2138 * bundle could not be created, since we don't yet
2139 * have the xfrm_state's. We need to wait for KM to
2140 * negotiate new SA's or bail out with error.*/
2141 if (net
->xfrm
.sysctl_larval_drop
) {
2143 xfrm_pols_put(pols
, drop_pols
);
2144 XFRM_INC_STATS(net
, LINUX_MIB_XFRMOUTNOSTATES
);
2146 return make_blackhole(net
, family
, dst_orig
);
2148 if (fl
->flowi_flags
& FLOWI_FLAG_CAN_SLEEP
) {
2149 DECLARE_WAITQUEUE(wait
, current
);
2151 add_wait_queue(&net
->xfrm
.km_waitq
, &wait
);
2152 set_current_state(TASK_INTERRUPTIBLE
);
2154 set_current_state(TASK_RUNNING
);
2155 remove_wait_queue(&net
->xfrm
.km_waitq
, &wait
);
2157 if (!signal_pending(current
)) {
2166 XFRM_INC_STATS(net
, LINUX_MIB_XFRMOUTNOSTATES
);
2174 if ((flags
& XFRM_LOOKUP_ICMP
) &&
2175 !(pols
[0]->flags
& XFRM_POLICY_ICMP
)) {
2180 for (i
= 0; i
< num_pols
; i
++)
2181 pols
[i
]->curlft
.use_time
= get_seconds();
2183 if (num_xfrms
< 0) {
2184 /* Prohibit the flow */
2185 XFRM_INC_STATS(net
, LINUX_MIB_XFRMOUTPOLBLOCK
);
2188 } else if (num_xfrms
> 0) {
2189 /* Flow transformed */
2190 dst_release(dst_orig
);
2192 /* Flow passes untransformed */
2197 xfrm_pols_put(pols
, drop_pols
);
2198 if (dst
&& dst
->xfrm
&&
2199 dst
->xfrm
->props
.mode
== XFRM_MODE_TUNNEL
)
2200 dst
->flags
|= DST_XFRM_TUNNEL
;
2204 if (!(flags
& XFRM_LOOKUP_ICMP
)) {
2212 dst_release(dst_orig
);
2213 xfrm_pols_put(pols
, drop_pols
);
2214 return ERR_PTR(err
);
2216 EXPORT_SYMBOL(xfrm_lookup
);
2219 xfrm_secpath_reject(int idx
, struct sk_buff
*skb
, const struct flowi
*fl
)
2221 struct xfrm_state
*x
;
2223 if (!skb
->sp
|| idx
< 0 || idx
>= skb
->sp
->len
)
2225 x
= skb
->sp
->xvec
[idx
];
2226 if (!x
->type
->reject
)
2228 return x
->type
->reject(x
, skb
, fl
);
2231 /* When skb is transformed back to its "native" form, we have to
2232 * check policy restrictions. At the moment we make this in maximally
2233 * stupid way. Shame on me. :-) Of course, connected sockets must
2234 * have policy cached at them.
2238 xfrm_state_ok(const struct xfrm_tmpl
*tmpl
, const struct xfrm_state
*x
,
2239 unsigned short family
)
2241 if (xfrm_state_kern(x
))
2242 return tmpl
->optional
&& !xfrm_state_addr_cmp(tmpl
, x
, tmpl
->encap_family
);
2243 return x
->id
.proto
== tmpl
->id
.proto
&&
2244 (x
->id
.spi
== tmpl
->id
.spi
|| !tmpl
->id
.spi
) &&
2245 (x
->props
.reqid
== tmpl
->reqid
|| !tmpl
->reqid
) &&
2246 x
->props
.mode
== tmpl
->mode
&&
2247 (tmpl
->allalgs
|| (tmpl
->aalgos
& (1<<x
->props
.aalgo
)) ||
2248 !(xfrm_id_proto_match(tmpl
->id
.proto
, IPSEC_PROTO_ANY
))) &&
2249 !(x
->props
.mode
!= XFRM_MODE_TRANSPORT
&&
2250 xfrm_state_addr_cmp(tmpl
, x
, family
));
2254 * 0 or more than 0 is returned when validation is succeeded (either bypass
2255 * because of optional transport mode, or next index of the mathced secpath
2256 * state with the template.
2257 * -1 is returned when no matching template is found.
2258 * Otherwise "-2 - errored_index" is returned.
2261 xfrm_policy_ok(const struct xfrm_tmpl
*tmpl
, const struct sec_path
*sp
, int start
,
2262 unsigned short family
)
2266 if (tmpl
->optional
) {
2267 if (tmpl
->mode
== XFRM_MODE_TRANSPORT
)
2271 for (; idx
< sp
->len
; idx
++) {
2272 if (xfrm_state_ok(tmpl
, sp
->xvec
[idx
], family
))
2274 if (sp
->xvec
[idx
]->props
.mode
!= XFRM_MODE_TRANSPORT
) {
2283 int __xfrm_decode_session(struct sk_buff
*skb
, struct flowi
*fl
,
2284 unsigned int family
, int reverse
)
2286 struct xfrm_policy_afinfo
*afinfo
= xfrm_policy_get_afinfo(family
);
2289 if (unlikely(afinfo
== NULL
))
2290 return -EAFNOSUPPORT
;
2292 afinfo
->decode_session(skb
, fl
, reverse
);
2293 err
= security_xfrm_decode_session(skb
, &fl
->flowi_secid
);
2294 xfrm_policy_put_afinfo(afinfo
);
2297 EXPORT_SYMBOL(__xfrm_decode_session
);
2299 static inline int secpath_has_nontransport(const struct sec_path
*sp
, int k
, int *idxp
)
2301 for (; k
< sp
->len
; k
++) {
2302 if (sp
->xvec
[k
]->props
.mode
!= XFRM_MODE_TRANSPORT
) {
2311 int __xfrm_policy_check(struct sock
*sk
, int dir
, struct sk_buff
*skb
,
2312 unsigned short family
)
2314 struct net
*net
= dev_net(skb
->dev
);
2315 struct xfrm_policy
*pol
;
2316 struct xfrm_policy
*pols
[XFRM_POLICY_TYPE_MAX
];
2325 reverse
= dir
& ~XFRM_POLICY_MASK
;
2326 dir
&= XFRM_POLICY_MASK
;
2327 fl_dir
= policy_to_flow_dir(dir
);
2329 if (__xfrm_decode_session(skb
, &fl
, family
, reverse
) < 0) {
2330 XFRM_INC_STATS(net
, LINUX_MIB_XFRMINHDRERROR
);
2334 nf_nat_decode_session(skb
, &fl
, family
);
2336 /* First, check used SA against their selectors. */
2340 for (i
=skb
->sp
->len
-1; i
>=0; i
--) {
2341 struct xfrm_state
*x
= skb
->sp
->xvec
[i
];
2342 if (!xfrm_selector_match(&x
->sel
, &fl
, family
)) {
2343 XFRM_INC_STATS(net
, LINUX_MIB_XFRMINSTATEMISMATCH
);
2350 if (sk
&& sk
->sk_policy
[dir
]) {
2351 pol
= xfrm_sk_policy_lookup(sk
, dir
, &fl
);
2353 XFRM_INC_STATS(net
, LINUX_MIB_XFRMINPOLERROR
);
2359 struct flow_cache_object
*flo
;
2361 flo
= flow_cache_lookup(net
, &fl
, family
, fl_dir
,
2362 xfrm_policy_lookup
, NULL
);
2363 if (IS_ERR_OR_NULL(flo
))
2364 pol
= ERR_CAST(flo
);
2366 pol
= container_of(flo
, struct xfrm_policy
, flo
);
2370 XFRM_INC_STATS(net
, LINUX_MIB_XFRMINPOLERROR
);
2375 if (skb
->sp
&& secpath_has_nontransport(skb
->sp
, 0, &xerr_idx
)) {
2376 xfrm_secpath_reject(xerr_idx
, skb
, &fl
);
2377 XFRM_INC_STATS(net
, LINUX_MIB_XFRMINNOPOLS
);
2383 pol
->curlft
.use_time
= get_seconds();
2387 #ifdef CONFIG_XFRM_SUB_POLICY
2388 if (pols
[0]->type
!= XFRM_POLICY_TYPE_MAIN
) {
2389 pols
[1] = xfrm_policy_lookup_bytype(net
, XFRM_POLICY_TYPE_MAIN
,
2393 if (IS_ERR(pols
[1])) {
2394 XFRM_INC_STATS(net
, LINUX_MIB_XFRMINPOLERROR
);
2397 pols
[1]->curlft
.use_time
= get_seconds();
2403 if (pol
->action
== XFRM_POLICY_ALLOW
) {
2404 struct sec_path
*sp
;
2405 static struct sec_path dummy
;
2406 struct xfrm_tmpl
*tp
[XFRM_MAX_DEPTH
];
2407 struct xfrm_tmpl
*stp
[XFRM_MAX_DEPTH
];
2408 struct xfrm_tmpl
**tpp
= tp
;
2412 if ((sp
= skb
->sp
) == NULL
)
2415 for (pi
= 0; pi
< npols
; pi
++) {
2416 if (pols
[pi
] != pol
&&
2417 pols
[pi
]->action
!= XFRM_POLICY_ALLOW
) {
2418 XFRM_INC_STATS(net
, LINUX_MIB_XFRMINPOLBLOCK
);
2421 if (ti
+ pols
[pi
]->xfrm_nr
>= XFRM_MAX_DEPTH
) {
2422 XFRM_INC_STATS(net
, LINUX_MIB_XFRMINBUFFERERROR
);
2425 for (i
= 0; i
< pols
[pi
]->xfrm_nr
; i
++)
2426 tpp
[ti
++] = &pols
[pi
]->xfrm_vec
[i
];
2430 xfrm_tmpl_sort(stp
, tpp
, xfrm_nr
, family
);
2434 /* For each tunnel xfrm, find the first matching tmpl.
2435 * For each tmpl before that, find corresponding xfrm.
2436 * Order is _important_. Later we will implement
2437 * some barriers, but at the moment barriers
2438 * are implied between each two transformations.
2440 for (i
= xfrm_nr
-1, k
= 0; i
>= 0; i
--) {
2441 k
= xfrm_policy_ok(tpp
[i
], sp
, k
, family
);
2444 /* "-2 - errored_index" returned */
2446 XFRM_INC_STATS(net
, LINUX_MIB_XFRMINTMPLMISMATCH
);
2451 if (secpath_has_nontransport(sp
, k
, &xerr_idx
)) {
2452 XFRM_INC_STATS(net
, LINUX_MIB_XFRMINTMPLMISMATCH
);
2456 xfrm_pols_put(pols
, npols
);
2459 XFRM_INC_STATS(net
, LINUX_MIB_XFRMINPOLBLOCK
);
2462 xfrm_secpath_reject(xerr_idx
, skb
, &fl
);
2464 xfrm_pols_put(pols
, npols
);
2467 EXPORT_SYMBOL(__xfrm_policy_check
);
2469 int __xfrm_route_forward(struct sk_buff
*skb
, unsigned short family
)
2471 struct net
*net
= dev_net(skb
->dev
);
2473 struct dst_entry
*dst
;
2476 if (xfrm_decode_session(skb
, &fl
, family
) < 0) {
2477 XFRM_INC_STATS(net
, LINUX_MIB_XFRMFWDHDRERROR
);
2483 dst
= xfrm_lookup(net
, skb_dst(skb
), &fl
, NULL
, 0);
2488 skb_dst_set(skb
, dst
);
2491 EXPORT_SYMBOL(__xfrm_route_forward
);
2493 /* Optimize later using cookies and generation ids. */
2495 static struct dst_entry
*xfrm_dst_check(struct dst_entry
*dst
, u32 cookie
)
2497 /* Code (such as __xfrm4_bundle_create()) sets dst->obsolete
2498 * to DST_OBSOLETE_FORCE_CHK to force all XFRM destinations to
2499 * get validated by dst_ops->check on every use. We do this
2500 * because when a normal route referenced by an XFRM dst is
2501 * obsoleted we do not go looking around for all parent
2502 * referencing XFRM dsts so that we can invalidate them. It
2503 * is just too much work. Instead we make the checks here on
2504 * every use. For example:
2506 * XFRM dst A --> IPv4 dst X
2508 * X is the "xdst->route" of A (X is also the "dst->path" of A
2509 * in this example). If X is marked obsolete, "A" will not
2510 * notice. That's what we are validating here via the
2511 * stale_bundle() check.
2513 * When a policy's bundle is pruned, we dst_free() the XFRM
2514 * dst which causes it's ->obsolete field to be set to
2515 * DST_OBSOLETE_DEAD. If an XFRM dst has been pruned like
2516 * this, we want to force a new route lookup.
2518 if (dst
->obsolete
< 0 && !stale_bundle(dst
))
2524 static int stale_bundle(struct dst_entry
*dst
)
2526 return !xfrm_bundle_ok((struct xfrm_dst
*)dst
);
2529 void xfrm_dst_ifdown(struct dst_entry
*dst
, struct net_device
*dev
)
2531 while ((dst
= dst
->child
) && dst
->xfrm
&& dst
->dev
== dev
) {
2532 dst
->dev
= dev_net(dev
)->loopback_dev
;
2537 EXPORT_SYMBOL(xfrm_dst_ifdown
);
2539 static void xfrm_link_failure(struct sk_buff
*skb
)
2541 /* Impossible. Such dst must be popped before reaches point of failure. */
2544 static struct dst_entry
*xfrm_negative_advice(struct dst_entry
*dst
)
2547 if (dst
->obsolete
) {
2555 static void __xfrm_garbage_collect(struct net
*net
)
2557 struct dst_entry
*head
, *next
;
2559 spin_lock_bh(&xfrm_policy_sk_bundle_lock
);
2560 head
= xfrm_policy_sk_bundles
;
2561 xfrm_policy_sk_bundles
= NULL
;
2562 spin_unlock_bh(&xfrm_policy_sk_bundle_lock
);
2571 void xfrm_garbage_collect(struct net
*net
)
2574 __xfrm_garbage_collect(net
);
2576 EXPORT_SYMBOL(xfrm_garbage_collect
);
2578 static void xfrm_garbage_collect_deferred(struct net
*net
)
2580 flow_cache_flush_deferred();
2581 __xfrm_garbage_collect(net
);
2584 static void xfrm_init_pmtu(struct dst_entry
*dst
)
2587 struct xfrm_dst
*xdst
= (struct xfrm_dst
*)dst
;
2588 u32 pmtu
, route_mtu_cached
;
2590 pmtu
= dst_mtu(dst
->child
);
2591 xdst
->child_mtu_cached
= pmtu
;
2593 pmtu
= xfrm_state_mtu(dst
->xfrm
, pmtu
);
2595 route_mtu_cached
= dst_mtu(xdst
->route
);
2596 xdst
->route_mtu_cached
= route_mtu_cached
;
2598 if (pmtu
> route_mtu_cached
)
2599 pmtu
= route_mtu_cached
;
2601 dst_metric_set(dst
, RTAX_MTU
, pmtu
);
2602 } while ((dst
= dst
->next
));
2605 /* Check that the bundle accepts the flow and its components are
2609 static int xfrm_bundle_ok(struct xfrm_dst
*first
)
2611 struct dst_entry
*dst
= &first
->u
.dst
;
2612 struct xfrm_dst
*last
;
2615 if (!dst_check(dst
->path
, ((struct xfrm_dst
*)dst
)->path_cookie
) ||
2616 (dst
->dev
&& !netif_running(dst
->dev
)))
2619 if (dst
->flags
& DST_XFRM_QUEUE
)
2625 struct xfrm_dst
*xdst
= (struct xfrm_dst
*)dst
;
2627 if (dst
->xfrm
->km
.state
!= XFRM_STATE_VALID
)
2629 if (xdst
->xfrm_genid
!= dst
->xfrm
->genid
)
2631 if (xdst
->num_pols
> 0 &&
2632 xdst
->policy_genid
!= atomic_read(&xdst
->pols
[0]->genid
))
2635 mtu
= dst_mtu(dst
->child
);
2636 if (xdst
->child_mtu_cached
!= mtu
) {
2638 xdst
->child_mtu_cached
= mtu
;
2641 if (!dst_check(xdst
->route
, xdst
->route_cookie
))
2643 mtu
= dst_mtu(xdst
->route
);
2644 if (xdst
->route_mtu_cached
!= mtu
) {
2646 xdst
->route_mtu_cached
= mtu
;
2650 } while (dst
->xfrm
);
2655 mtu
= last
->child_mtu_cached
;
2659 mtu
= xfrm_state_mtu(dst
->xfrm
, mtu
);
2660 if (mtu
> last
->route_mtu_cached
)
2661 mtu
= last
->route_mtu_cached
;
2662 dst_metric_set(dst
, RTAX_MTU
, mtu
);
2667 last
= (struct xfrm_dst
*)last
->u
.dst
.next
;
2668 last
->child_mtu_cached
= mtu
;
2674 static unsigned int xfrm_default_advmss(const struct dst_entry
*dst
)
2676 return dst_metric_advmss(dst
->path
);
2679 static unsigned int xfrm_mtu(const struct dst_entry
*dst
)
2681 unsigned int mtu
= dst_metric_raw(dst
, RTAX_MTU
);
2683 return mtu
? : dst_mtu(dst
->path
);
2686 static struct neighbour
*xfrm_neigh_lookup(const struct dst_entry
*dst
,
2687 struct sk_buff
*skb
,
2690 return dst
->path
->ops
->neigh_lookup(dst
, skb
, daddr
);
2693 int xfrm_policy_register_afinfo(struct xfrm_policy_afinfo
*afinfo
)
2697 if (unlikely(afinfo
== NULL
))
2699 if (unlikely(afinfo
->family
>= NPROTO
))
2700 return -EAFNOSUPPORT
;
2701 spin_lock(&xfrm_policy_afinfo_lock
);
2702 if (unlikely(xfrm_policy_afinfo
[afinfo
->family
] != NULL
))
2705 struct dst_ops
*dst_ops
= afinfo
->dst_ops
;
2706 if (likely(dst_ops
->kmem_cachep
== NULL
))
2707 dst_ops
->kmem_cachep
= xfrm_dst_cache
;
2708 if (likely(dst_ops
->check
== NULL
))
2709 dst_ops
->check
= xfrm_dst_check
;
2710 if (likely(dst_ops
->default_advmss
== NULL
))
2711 dst_ops
->default_advmss
= xfrm_default_advmss
;
2712 if (likely(dst_ops
->mtu
== NULL
))
2713 dst_ops
->mtu
= xfrm_mtu
;
2714 if (likely(dst_ops
->negative_advice
== NULL
))
2715 dst_ops
->negative_advice
= xfrm_negative_advice
;
2716 if (likely(dst_ops
->link_failure
== NULL
))
2717 dst_ops
->link_failure
= xfrm_link_failure
;
2718 if (likely(dst_ops
->neigh_lookup
== NULL
))
2719 dst_ops
->neigh_lookup
= xfrm_neigh_lookup
;
2720 if (likely(afinfo
->garbage_collect
== NULL
))
2721 afinfo
->garbage_collect
= xfrm_garbage_collect_deferred
;
2722 rcu_assign_pointer(xfrm_policy_afinfo
[afinfo
->family
], afinfo
);
2724 spin_unlock(&xfrm_policy_afinfo_lock
);
2728 struct dst_ops
*xfrm_dst_ops
;
2730 switch (afinfo
->family
) {
2732 xfrm_dst_ops
= &net
->xfrm
.xfrm4_dst_ops
;
2734 #if IS_ENABLED(CONFIG_IPV6)
2736 xfrm_dst_ops
= &net
->xfrm
.xfrm6_dst_ops
;
2742 *xfrm_dst_ops
= *afinfo
->dst_ops
;
2748 EXPORT_SYMBOL(xfrm_policy_register_afinfo
);
2750 int xfrm_policy_unregister_afinfo(struct xfrm_policy_afinfo
*afinfo
)
2753 if (unlikely(afinfo
== NULL
))
2755 if (unlikely(afinfo
->family
>= NPROTO
))
2756 return -EAFNOSUPPORT
;
2757 spin_lock(&xfrm_policy_afinfo_lock
);
2758 if (likely(xfrm_policy_afinfo
[afinfo
->family
] != NULL
)) {
2759 if (unlikely(xfrm_policy_afinfo
[afinfo
->family
] != afinfo
))
2762 RCU_INIT_POINTER(xfrm_policy_afinfo
[afinfo
->family
],
2765 spin_unlock(&xfrm_policy_afinfo_lock
);
2767 struct dst_ops
*dst_ops
= afinfo
->dst_ops
;
2771 dst_ops
->kmem_cachep
= NULL
;
2772 dst_ops
->check
= NULL
;
2773 dst_ops
->negative_advice
= NULL
;
2774 dst_ops
->link_failure
= NULL
;
2775 afinfo
->garbage_collect
= NULL
;
2779 EXPORT_SYMBOL(xfrm_policy_unregister_afinfo
);
2781 static void __net_init
xfrm_dst_ops_init(struct net
*net
)
2783 struct xfrm_policy_afinfo
*afinfo
;
2786 afinfo
= rcu_dereference(xfrm_policy_afinfo
[AF_INET
]);
2788 net
->xfrm
.xfrm4_dst_ops
= *afinfo
->dst_ops
;
2789 #if IS_ENABLED(CONFIG_IPV6)
2790 afinfo
= rcu_dereference(xfrm_policy_afinfo
[AF_INET6
]);
2792 net
->xfrm
.xfrm6_dst_ops
= *afinfo
->dst_ops
;
2797 static int xfrm_dev_event(struct notifier_block
*this, unsigned long event
, void *ptr
)
2799 struct net_device
*dev
= netdev_notifier_info_to_dev(ptr
);
2803 xfrm_garbage_collect(dev_net(dev
));
2808 static struct notifier_block xfrm_dev_notifier
= {
2809 .notifier_call
= xfrm_dev_event
,
2812 #ifdef CONFIG_XFRM_STATISTICS
2813 static int __net_init
xfrm_statistics_init(struct net
*net
)
2817 if (snmp_mib_init((void __percpu
**)net
->mib
.xfrm_statistics
,
2818 sizeof(struct linux_xfrm_mib
),
2819 __alignof__(struct linux_xfrm_mib
)) < 0)
2821 rv
= xfrm_proc_init(net
);
2823 snmp_mib_free((void __percpu
**)net
->mib
.xfrm_statistics
);
2827 static void xfrm_statistics_fini(struct net
*net
)
2829 xfrm_proc_fini(net
);
2830 snmp_mib_free((void __percpu
**)net
->mib
.xfrm_statistics
);
2833 static int __net_init
xfrm_statistics_init(struct net
*net
)
2838 static void xfrm_statistics_fini(struct net
*net
)
2843 static int __net_init
xfrm_policy_init(struct net
*net
)
2845 unsigned int hmask
, sz
;
2848 if (net_eq(net
, &init_net
))
2849 xfrm_dst_cache
= kmem_cache_create("xfrm_dst_cache",
2850 sizeof(struct xfrm_dst
),
2851 0, SLAB_HWCACHE_ALIGN
|SLAB_PANIC
,
2855 sz
= (hmask
+1) * sizeof(struct hlist_head
);
2857 net
->xfrm
.policy_byidx
= xfrm_hash_alloc(sz
);
2858 if (!net
->xfrm
.policy_byidx
)
2860 net
->xfrm
.policy_idx_hmask
= hmask
;
2862 for (dir
= 0; dir
< XFRM_POLICY_MAX
* 2; dir
++) {
2863 struct xfrm_policy_hash
*htab
;
2865 net
->xfrm
.policy_count
[dir
] = 0;
2866 INIT_HLIST_HEAD(&net
->xfrm
.policy_inexact
[dir
]);
2868 htab
= &net
->xfrm
.policy_bydst
[dir
];
2869 htab
->table
= xfrm_hash_alloc(sz
);
2872 htab
->hmask
= hmask
;
2875 INIT_LIST_HEAD(&net
->xfrm
.policy_all
);
2876 INIT_WORK(&net
->xfrm
.policy_hash_work
, xfrm_hash_resize
);
2877 if (net_eq(net
, &init_net
))
2878 register_netdevice_notifier(&xfrm_dev_notifier
);
2882 for (dir
--; dir
>= 0; dir
--) {
2883 struct xfrm_policy_hash
*htab
;
2885 htab
= &net
->xfrm
.policy_bydst
[dir
];
2886 xfrm_hash_free(htab
->table
, sz
);
2888 xfrm_hash_free(net
->xfrm
.policy_byidx
, sz
);
2893 static void xfrm_policy_fini(struct net
*net
)
2895 struct xfrm_audit audit_info
;
2899 flush_work(&net
->xfrm
.policy_hash_work
);
2900 #ifdef CONFIG_XFRM_SUB_POLICY
2901 audit_info
.loginuid
= INVALID_UID
;
2902 audit_info
.sessionid
= -1;
2903 audit_info
.secid
= 0;
2904 xfrm_policy_flush(net
, XFRM_POLICY_TYPE_SUB
, &audit_info
);
2906 audit_info
.loginuid
= INVALID_UID
;
2907 audit_info
.sessionid
= -1;
2908 audit_info
.secid
= 0;
2909 xfrm_policy_flush(net
, XFRM_POLICY_TYPE_MAIN
, &audit_info
);
2911 WARN_ON(!list_empty(&net
->xfrm
.policy_all
));
2913 for (dir
= 0; dir
< XFRM_POLICY_MAX
* 2; dir
++) {
2914 struct xfrm_policy_hash
*htab
;
2916 WARN_ON(!hlist_empty(&net
->xfrm
.policy_inexact
[dir
]));
2918 htab
= &net
->xfrm
.policy_bydst
[dir
];
2919 sz
= (htab
->hmask
+ 1) * sizeof(struct hlist_head
);
2920 WARN_ON(!hlist_empty(htab
->table
));
2921 xfrm_hash_free(htab
->table
, sz
);
2924 sz
= (net
->xfrm
.policy_idx_hmask
+ 1) * sizeof(struct hlist_head
);
2925 WARN_ON(!hlist_empty(net
->xfrm
.policy_byidx
));
2926 xfrm_hash_free(net
->xfrm
.policy_byidx
, sz
);
2929 static int __net_init
xfrm_net_init(struct net
*net
)
2933 rv
= xfrm_statistics_init(net
);
2935 goto out_statistics
;
2936 rv
= xfrm_state_init(net
);
2939 rv
= xfrm_policy_init(net
);
2942 xfrm_dst_ops_init(net
);
2943 rv
= xfrm_sysctl_init(net
);
2949 xfrm_policy_fini(net
);
2951 xfrm_state_fini(net
);
2953 xfrm_statistics_fini(net
);
2958 static void __net_exit
xfrm_net_exit(struct net
*net
)
2960 xfrm_sysctl_fini(net
);
2961 xfrm_policy_fini(net
);
2962 xfrm_state_fini(net
);
2963 xfrm_statistics_fini(net
);
2966 static struct pernet_operations __net_initdata xfrm_net_ops
= {
2967 .init
= xfrm_net_init
,
2968 .exit
= xfrm_net_exit
,
2971 void __init
xfrm_init(void)
2973 register_pernet_subsys(&xfrm_net_ops
);
2977 #ifdef CONFIG_AUDITSYSCALL
2978 static void xfrm_audit_common_policyinfo(struct xfrm_policy
*xp
,
2979 struct audit_buffer
*audit_buf
)
2981 struct xfrm_sec_ctx
*ctx
= xp
->security
;
2982 struct xfrm_selector
*sel
= &xp
->selector
;
2985 audit_log_format(audit_buf
, " sec_alg=%u sec_doi=%u sec_obj=%s",
2986 ctx
->ctx_alg
, ctx
->ctx_doi
, ctx
->ctx_str
);
2988 switch(sel
->family
) {
2990 audit_log_format(audit_buf
, " src=%pI4", &sel
->saddr
.a4
);
2991 if (sel
->prefixlen_s
!= 32)
2992 audit_log_format(audit_buf
, " src_prefixlen=%d",
2994 audit_log_format(audit_buf
, " dst=%pI4", &sel
->daddr
.a4
);
2995 if (sel
->prefixlen_d
!= 32)
2996 audit_log_format(audit_buf
, " dst_prefixlen=%d",
3000 audit_log_format(audit_buf
, " src=%pI6", sel
->saddr
.a6
);
3001 if (sel
->prefixlen_s
!= 128)
3002 audit_log_format(audit_buf
, " src_prefixlen=%d",
3004 audit_log_format(audit_buf
, " dst=%pI6", sel
->daddr
.a6
);
3005 if (sel
->prefixlen_d
!= 128)
3006 audit_log_format(audit_buf
, " dst_prefixlen=%d",
3012 void xfrm_audit_policy_add(struct xfrm_policy
*xp
, int result
,
3013 kuid_t auid
, u32 sessionid
, u32 secid
)
3015 struct audit_buffer
*audit_buf
;
3017 audit_buf
= xfrm_audit_start("SPD-add");
3018 if (audit_buf
== NULL
)
3020 xfrm_audit_helper_usrinfo(auid
, sessionid
, secid
, audit_buf
);
3021 audit_log_format(audit_buf
, " res=%u", result
);
3022 xfrm_audit_common_policyinfo(xp
, audit_buf
);
3023 audit_log_end(audit_buf
);
3025 EXPORT_SYMBOL_GPL(xfrm_audit_policy_add
);
3027 void xfrm_audit_policy_delete(struct xfrm_policy
*xp
, int result
,
3028 kuid_t auid
, u32 sessionid
, u32 secid
)
3030 struct audit_buffer
*audit_buf
;
3032 audit_buf
= xfrm_audit_start("SPD-delete");
3033 if (audit_buf
== NULL
)
3035 xfrm_audit_helper_usrinfo(auid
, sessionid
, secid
, audit_buf
);
3036 audit_log_format(audit_buf
, " res=%u", result
);
3037 xfrm_audit_common_policyinfo(xp
, audit_buf
);
3038 audit_log_end(audit_buf
);
3040 EXPORT_SYMBOL_GPL(xfrm_audit_policy_delete
);
3043 #ifdef CONFIG_XFRM_MIGRATE
3044 static bool xfrm_migrate_selector_match(const struct xfrm_selector
*sel_cmp
,
3045 const struct xfrm_selector
*sel_tgt
)
3047 if (sel_cmp
->proto
== IPSEC_ULPROTO_ANY
) {
3048 if (sel_tgt
->family
== sel_cmp
->family
&&
3049 xfrm_addr_equal(&sel_tgt
->daddr
, &sel_cmp
->daddr
,
3051 xfrm_addr_equal(&sel_tgt
->saddr
, &sel_cmp
->saddr
,
3053 sel_tgt
->prefixlen_d
== sel_cmp
->prefixlen_d
&&
3054 sel_tgt
->prefixlen_s
== sel_cmp
->prefixlen_s
) {
3058 if (memcmp(sel_tgt
, sel_cmp
, sizeof(*sel_tgt
)) == 0) {
3065 static struct xfrm_policy
* xfrm_migrate_policy_find(const struct xfrm_selector
*sel
,
3068 struct xfrm_policy
*pol
, *ret
= NULL
;
3069 struct hlist_head
*chain
;
3072 read_lock_bh(&xfrm_policy_lock
);
3073 chain
= policy_hash_direct(&init_net
, &sel
->daddr
, &sel
->saddr
, sel
->family
, dir
);
3074 hlist_for_each_entry(pol
, chain
, bydst
) {
3075 if (xfrm_migrate_selector_match(sel
, &pol
->selector
) &&
3076 pol
->type
== type
) {
3078 priority
= ret
->priority
;
3082 chain
= &init_net
.xfrm
.policy_inexact
[dir
];
3083 hlist_for_each_entry(pol
, chain
, bydst
) {
3084 if (xfrm_migrate_selector_match(sel
, &pol
->selector
) &&
3085 pol
->type
== type
&&
3086 pol
->priority
< priority
) {
3095 read_unlock_bh(&xfrm_policy_lock
);
3100 static int migrate_tmpl_match(const struct xfrm_migrate
*m
, const struct xfrm_tmpl
*t
)
3104 if (t
->mode
== m
->mode
&& t
->id
.proto
== m
->proto
&&
3105 (m
->reqid
== 0 || t
->reqid
== m
->reqid
)) {
3107 case XFRM_MODE_TUNNEL
:
3108 case XFRM_MODE_BEET
:
3109 if (xfrm_addr_equal(&t
->id
.daddr
, &m
->old_daddr
,
3111 xfrm_addr_equal(&t
->saddr
, &m
->old_saddr
,
3116 case XFRM_MODE_TRANSPORT
:
3117 /* in case of transport mode, template does not store
3118 any IP addresses, hence we just compare mode and
3129 /* update endpoint address(es) of template(s) */
3130 static int xfrm_policy_migrate(struct xfrm_policy
*pol
,
3131 struct xfrm_migrate
*m
, int num_migrate
)
3133 struct xfrm_migrate
*mp
;
3136 write_lock_bh(&pol
->lock
);
3137 if (unlikely(pol
->walk
.dead
)) {
3138 /* target policy has been deleted */
3139 write_unlock_bh(&pol
->lock
);
3143 for (i
= 0; i
< pol
->xfrm_nr
; i
++) {
3144 for (j
= 0, mp
= m
; j
< num_migrate
; j
++, mp
++) {
3145 if (!migrate_tmpl_match(mp
, &pol
->xfrm_vec
[i
]))
3148 if (pol
->xfrm_vec
[i
].mode
!= XFRM_MODE_TUNNEL
&&
3149 pol
->xfrm_vec
[i
].mode
!= XFRM_MODE_BEET
)
3151 /* update endpoints */
3152 memcpy(&pol
->xfrm_vec
[i
].id
.daddr
, &mp
->new_daddr
,
3153 sizeof(pol
->xfrm_vec
[i
].id
.daddr
));
3154 memcpy(&pol
->xfrm_vec
[i
].saddr
, &mp
->new_saddr
,
3155 sizeof(pol
->xfrm_vec
[i
].saddr
));
3156 pol
->xfrm_vec
[i
].encap_family
= mp
->new_family
;
3158 atomic_inc(&pol
->genid
);
3162 write_unlock_bh(&pol
->lock
);
3170 static int xfrm_migrate_check(const struct xfrm_migrate
*m
, int num_migrate
)
3174 if (num_migrate
< 1 || num_migrate
> XFRM_MAX_DEPTH
)
3177 for (i
= 0; i
< num_migrate
; i
++) {
3178 if (xfrm_addr_equal(&m
[i
].old_daddr
, &m
[i
].new_daddr
,
3180 xfrm_addr_equal(&m
[i
].old_saddr
, &m
[i
].new_saddr
,
3183 if (xfrm_addr_any(&m
[i
].new_daddr
, m
[i
].new_family
) ||
3184 xfrm_addr_any(&m
[i
].new_saddr
, m
[i
].new_family
))
3187 /* check if there is any duplicated entry */
3188 for (j
= i
+ 1; j
< num_migrate
; j
++) {
3189 if (!memcmp(&m
[i
].old_daddr
, &m
[j
].old_daddr
,
3190 sizeof(m
[i
].old_daddr
)) &&
3191 !memcmp(&m
[i
].old_saddr
, &m
[j
].old_saddr
,
3192 sizeof(m
[i
].old_saddr
)) &&
3193 m
[i
].proto
== m
[j
].proto
&&
3194 m
[i
].mode
== m
[j
].mode
&&
3195 m
[i
].reqid
== m
[j
].reqid
&&
3196 m
[i
].old_family
== m
[j
].old_family
)
3204 int xfrm_migrate(const struct xfrm_selector
*sel
, u8 dir
, u8 type
,
3205 struct xfrm_migrate
*m
, int num_migrate
,
3206 struct xfrm_kmaddress
*k
)
3208 int i
, err
, nx_cur
= 0, nx_new
= 0;
3209 struct xfrm_policy
*pol
= NULL
;
3210 struct xfrm_state
*x
, *xc
;
3211 struct xfrm_state
*x_cur
[XFRM_MAX_DEPTH
];
3212 struct xfrm_state
*x_new
[XFRM_MAX_DEPTH
];
3213 struct xfrm_migrate
*mp
;
3215 if ((err
= xfrm_migrate_check(m
, num_migrate
)) < 0)
3218 /* Stage 1 - find policy */
3219 if ((pol
= xfrm_migrate_policy_find(sel
, dir
, type
)) == NULL
) {
3224 /* Stage 2 - find and update state(s) */
3225 for (i
= 0, mp
= m
; i
< num_migrate
; i
++, mp
++) {
3226 if ((x
= xfrm_migrate_state_find(mp
))) {
3229 if ((xc
= xfrm_state_migrate(x
, mp
))) {
3239 /* Stage 3 - update policy */
3240 if ((err
= xfrm_policy_migrate(pol
, m
, num_migrate
)) < 0)
3243 /* Stage 4 - delete old state(s) */
3245 xfrm_states_put(x_cur
, nx_cur
);
3246 xfrm_states_delete(x_cur
, nx_cur
);
3249 /* Stage 5 - announce */
3250 km_migrate(sel
, dir
, type
, m
, num_migrate
, k
);
3262 xfrm_states_put(x_cur
, nx_cur
);
3264 xfrm_states_delete(x_new
, nx_new
);
3268 EXPORT_SYMBOL(xfrm_migrate
);