1 // SPDX-License-Identifier: GPL-2.0-only
7 * Kazunori MIYAZAWA @USAGI
8 * Kunihiro Ishiguro <kunihiro@ipinfusion.com>
10 * Kazunori MIYAZAWA @USAGI
12 * Split up af-specific portion
13 * Derek Atkins <derek@ihtfp.com> Add the post_input processor
17 #include <linux/err.h>
18 #include <linux/slab.h>
19 #include <linux/kmod.h>
20 #include <linux/list.h>
21 #include <linux/spinlock.h>
22 #include <linux/workqueue.h>
23 #include <linux/notifier.h>
24 #include <linux/netdevice.h>
25 #include <linux/netfilter.h>
26 #include <linux/module.h>
27 #include <linux/cache.h>
28 #include <linux/cpu.h>
29 #include <linux/audit.h>
30 #include <linux/rhashtable.h>
31 #include <linux/if_tunnel.h>
36 #if IS_ENABLED(CONFIG_IPV6_MIP6)
39 #ifdef CONFIG_XFRM_STATISTICS
43 #include "xfrm_hash.h"
45 #define XFRM_QUEUE_TMO_MIN ((unsigned)(HZ/10))
46 #define XFRM_QUEUE_TMO_MAX ((unsigned)(60*HZ))
47 #define XFRM_MAX_QUEUE_LEN 100
50 struct dst_entry
*dst_orig
;
54 /* prefixes smaller than this are stored in lists, not trees. */
55 #define INEXACT_PREFIXLEN_IPV4 16
56 #define INEXACT_PREFIXLEN_IPV6 48
58 struct xfrm_pol_inexact_node
{
68 /* the policies matching this node, can be empty list */
69 struct hlist_head hhead
;
72 /* xfrm inexact policy search tree:
73 * xfrm_pol_inexact_bin = hash(dir,type,family,if_id);
75 * +---- root_d: sorted by daddr:prefix
77 * | xfrm_pol_inexact_node
79 * | +- root: sorted by saddr/prefix
81 * | | xfrm_pol_inexact_node
85 * | | + hhead: saddr:daddr policies
87 * | +- coarse policies and all any:daddr policies
89 * +---- root_s: sorted by saddr:prefix
91 * | xfrm_pol_inexact_node
95 * | + hhead: saddr:any policies
97 * +---- coarse policies and all any:any policies
99 * Lookups return four candidate lists:
100 * 1. any:any list from top-level xfrm_pol_inexact_bin
101 * 2. any:daddr list from daddr tree
102 * 3. saddr:daddr list from 2nd level daddr tree
103 * 4. saddr:any list from saddr tree
105 * This result set then needs to be searched for the policy with
106 * the lowest priority. If two results have same prio, youngest one wins.
109 struct xfrm_pol_inexact_key
{
116 struct xfrm_pol_inexact_bin
{
117 struct xfrm_pol_inexact_key k
;
118 struct rhash_head head
;
119 /* list containing '*:*' policies */
120 struct hlist_head hhead
;
123 /* tree sorted by daddr/prefix */
124 struct rb_root root_d
;
126 /* tree sorted by saddr/prefix */
127 struct rb_root root_s
;
129 /* slow path below */
130 struct list_head inexact_bins
;
134 enum xfrm_pol_inexact_candidate_type
{
143 struct xfrm_pol_inexact_candidates
{
144 struct hlist_head
*res
[XFRM_POL_CAND_MAX
];
147 static DEFINE_SPINLOCK(xfrm_if_cb_lock
);
148 static struct xfrm_if_cb
const __rcu
*xfrm_if_cb __read_mostly
;
150 static DEFINE_SPINLOCK(xfrm_policy_afinfo_lock
);
151 static struct xfrm_policy_afinfo
const __rcu
*xfrm_policy_afinfo
[AF_INET6
+ 1]
154 static struct kmem_cache
*xfrm_dst_cache __ro_after_init
;
155 static __read_mostly seqcount_t xfrm_policy_hash_generation
;
157 static struct rhashtable xfrm_policy_inexact_table
;
158 static const struct rhashtable_params xfrm_pol_inexact_params
;
160 static void xfrm_init_pmtu(struct xfrm_dst
**bundle
, int nr
);
161 static int stale_bundle(struct dst_entry
*dst
);
162 static int xfrm_bundle_ok(struct xfrm_dst
*xdst
);
163 static void xfrm_policy_queue_process(struct timer_list
*t
);
165 static void __xfrm_policy_link(struct xfrm_policy
*pol
, int dir
);
166 static struct xfrm_policy
*__xfrm_policy_unlink(struct xfrm_policy
*pol
,
169 static struct xfrm_pol_inexact_bin
*
170 xfrm_policy_inexact_lookup(struct net
*net
, u8 type
, u16 family
, u8 dir
,
173 static struct xfrm_pol_inexact_bin
*
174 xfrm_policy_inexact_lookup_rcu(struct net
*net
,
175 u8 type
, u16 family
, u8 dir
, u32 if_id
);
176 static struct xfrm_policy
*
177 xfrm_policy_insert_list(struct hlist_head
*chain
, struct xfrm_policy
*policy
,
179 static void xfrm_policy_insert_inexact_list(struct hlist_head
*chain
,
180 struct xfrm_policy
*policy
);
183 xfrm_policy_find_inexact_candidates(struct xfrm_pol_inexact_candidates
*cand
,
184 struct xfrm_pol_inexact_bin
*b
,
185 const xfrm_address_t
*saddr
,
186 const xfrm_address_t
*daddr
);
188 static inline bool xfrm_pol_hold_rcu(struct xfrm_policy
*policy
)
190 return refcount_inc_not_zero(&policy
->refcnt
);
194 __xfrm4_selector_match(const struct xfrm_selector
*sel
, const struct flowi
*fl
)
196 const struct flowi4
*fl4
= &fl
->u
.ip4
;
198 return addr4_match(fl4
->daddr
, sel
->daddr
.a4
, sel
->prefixlen_d
) &&
199 addr4_match(fl4
->saddr
, sel
->saddr
.a4
, sel
->prefixlen_s
) &&
200 !((xfrm_flowi_dport(fl
, &fl4
->uli
) ^ sel
->dport
) & sel
->dport_mask
) &&
201 !((xfrm_flowi_sport(fl
, &fl4
->uli
) ^ sel
->sport
) & sel
->sport_mask
) &&
202 (fl4
->flowi4_proto
== sel
->proto
|| !sel
->proto
) &&
203 (fl4
->flowi4_oif
== sel
->ifindex
|| !sel
->ifindex
);
207 __xfrm6_selector_match(const struct xfrm_selector
*sel
, const struct flowi
*fl
)
209 const struct flowi6
*fl6
= &fl
->u
.ip6
;
211 return addr_match(&fl6
->daddr
, &sel
->daddr
, sel
->prefixlen_d
) &&
212 addr_match(&fl6
->saddr
, &sel
->saddr
, sel
->prefixlen_s
) &&
213 !((xfrm_flowi_dport(fl
, &fl6
->uli
) ^ sel
->dport
) & sel
->dport_mask
) &&
214 !((xfrm_flowi_sport(fl
, &fl6
->uli
) ^ sel
->sport
) & sel
->sport_mask
) &&
215 (fl6
->flowi6_proto
== sel
->proto
|| !sel
->proto
) &&
216 (fl6
->flowi6_oif
== sel
->ifindex
|| !sel
->ifindex
);
219 bool xfrm_selector_match(const struct xfrm_selector
*sel
, const struct flowi
*fl
,
220 unsigned short family
)
224 return __xfrm4_selector_match(sel
, fl
);
226 return __xfrm6_selector_match(sel
, fl
);
231 static const struct xfrm_policy_afinfo
*xfrm_policy_get_afinfo(unsigned short family
)
233 const struct xfrm_policy_afinfo
*afinfo
;
235 if (unlikely(family
>= ARRAY_SIZE(xfrm_policy_afinfo
)))
238 afinfo
= rcu_dereference(xfrm_policy_afinfo
[family
]);
239 if (unlikely(!afinfo
))
244 /* Called with rcu_read_lock(). */
245 static const struct xfrm_if_cb
*xfrm_if_get_cb(void)
247 return rcu_dereference(xfrm_if_cb
);
250 struct dst_entry
*__xfrm_dst_lookup(struct net
*net
, int tos
, int oif
,
251 const xfrm_address_t
*saddr
,
252 const xfrm_address_t
*daddr
,
253 int family
, u32 mark
)
255 const struct xfrm_policy_afinfo
*afinfo
;
256 struct dst_entry
*dst
;
258 afinfo
= xfrm_policy_get_afinfo(family
);
259 if (unlikely(afinfo
== NULL
))
260 return ERR_PTR(-EAFNOSUPPORT
);
262 dst
= afinfo
->dst_lookup(net
, tos
, oif
, saddr
, daddr
, mark
);
268 EXPORT_SYMBOL(__xfrm_dst_lookup
);
270 static inline struct dst_entry
*xfrm_dst_lookup(struct xfrm_state
*x
,
272 xfrm_address_t
*prev_saddr
,
273 xfrm_address_t
*prev_daddr
,
274 int family
, u32 mark
)
276 struct net
*net
= xs_net(x
);
277 xfrm_address_t
*saddr
= &x
->props
.saddr
;
278 xfrm_address_t
*daddr
= &x
->id
.daddr
;
279 struct dst_entry
*dst
;
281 if (x
->type
->flags
& XFRM_TYPE_LOCAL_COADDR
) {
285 if (x
->type
->flags
& XFRM_TYPE_REMOTE_COADDR
) {
290 dst
= __xfrm_dst_lookup(net
, tos
, oif
, saddr
, daddr
, family
, mark
);
293 if (prev_saddr
!= saddr
)
294 memcpy(prev_saddr
, saddr
, sizeof(*prev_saddr
));
295 if (prev_daddr
!= daddr
)
296 memcpy(prev_daddr
, daddr
, sizeof(*prev_daddr
));
302 static inline unsigned long make_jiffies(long secs
)
304 if (secs
>= (MAX_SCHEDULE_TIMEOUT
-1)/HZ
)
305 return MAX_SCHEDULE_TIMEOUT
-1;
310 static void xfrm_policy_timer(struct timer_list
*t
)
312 struct xfrm_policy
*xp
= from_timer(xp
, t
, timer
);
313 time64_t now
= ktime_get_real_seconds();
314 time64_t next
= TIME64_MAX
;
318 read_lock(&xp
->lock
);
320 if (unlikely(xp
->walk
.dead
))
323 dir
= xfrm_policy_id2dir(xp
->index
);
325 if (xp
->lft
.hard_add_expires_seconds
) {
326 time64_t tmo
= xp
->lft
.hard_add_expires_seconds
+
327 xp
->curlft
.add_time
- now
;
333 if (xp
->lft
.hard_use_expires_seconds
) {
334 time64_t tmo
= xp
->lft
.hard_use_expires_seconds
+
335 (xp
->curlft
.use_time
? : xp
->curlft
.add_time
) - now
;
341 if (xp
->lft
.soft_add_expires_seconds
) {
342 time64_t tmo
= xp
->lft
.soft_add_expires_seconds
+
343 xp
->curlft
.add_time
- now
;
346 tmo
= XFRM_KM_TIMEOUT
;
351 if (xp
->lft
.soft_use_expires_seconds
) {
352 time64_t tmo
= xp
->lft
.soft_use_expires_seconds
+
353 (xp
->curlft
.use_time
? : xp
->curlft
.add_time
) - now
;
356 tmo
= XFRM_KM_TIMEOUT
;
363 km_policy_expired(xp
, dir
, 0, 0);
364 if (next
!= TIME64_MAX
&&
365 !mod_timer(&xp
->timer
, jiffies
+ make_jiffies(next
)))
369 read_unlock(&xp
->lock
);
374 read_unlock(&xp
->lock
);
375 if (!xfrm_policy_delete(xp
, dir
))
376 km_policy_expired(xp
, dir
, 1, 0);
380 /* Allocate xfrm_policy. Not used here, it is supposed to be used by pfkeyv2
384 struct xfrm_policy
*xfrm_policy_alloc(struct net
*net
, gfp_t gfp
)
386 struct xfrm_policy
*policy
;
388 policy
= kzalloc(sizeof(struct xfrm_policy
), gfp
);
391 write_pnet(&policy
->xp_net
, net
);
392 INIT_LIST_HEAD(&policy
->walk
.all
);
393 INIT_HLIST_NODE(&policy
->bydst_inexact_list
);
394 INIT_HLIST_NODE(&policy
->bydst
);
395 INIT_HLIST_NODE(&policy
->byidx
);
396 rwlock_init(&policy
->lock
);
397 refcount_set(&policy
->refcnt
, 1);
398 skb_queue_head_init(&policy
->polq
.hold_queue
);
399 timer_setup(&policy
->timer
, xfrm_policy_timer
, 0);
400 timer_setup(&policy
->polq
.hold_timer
,
401 xfrm_policy_queue_process
, 0);
405 EXPORT_SYMBOL(xfrm_policy_alloc
);
407 static void xfrm_policy_destroy_rcu(struct rcu_head
*head
)
409 struct xfrm_policy
*policy
= container_of(head
, struct xfrm_policy
, rcu
);
411 security_xfrm_policy_free(policy
->security
);
415 /* Destroy xfrm_policy: descendant resources must be released to this moment. */
417 void xfrm_policy_destroy(struct xfrm_policy
*policy
)
419 BUG_ON(!policy
->walk
.dead
);
421 if (del_timer(&policy
->timer
) || del_timer(&policy
->polq
.hold_timer
))
424 call_rcu(&policy
->rcu
, xfrm_policy_destroy_rcu
);
426 EXPORT_SYMBOL(xfrm_policy_destroy
);
428 /* Rule must be locked. Release descendant resources, announce
429 * entry dead. The rule must be unlinked from lists to the moment.
432 static void xfrm_policy_kill(struct xfrm_policy
*policy
)
434 write_lock_bh(&policy
->lock
);
435 policy
->walk
.dead
= 1;
436 write_unlock_bh(&policy
->lock
);
438 atomic_inc(&policy
->genid
);
440 if (del_timer(&policy
->polq
.hold_timer
))
441 xfrm_pol_put(policy
);
442 skb_queue_purge(&policy
->polq
.hold_queue
);
444 if (del_timer(&policy
->timer
))
445 xfrm_pol_put(policy
);
447 xfrm_pol_put(policy
);
450 static unsigned int xfrm_policy_hashmax __read_mostly
= 1 * 1024 * 1024;
452 static inline unsigned int idx_hash(struct net
*net
, u32 index
)
454 return __idx_hash(index
, net
->xfrm
.policy_idx_hmask
);
457 /* calculate policy hash thresholds */
458 static void __get_hash_thresh(struct net
*net
,
459 unsigned short family
, int dir
,
460 u8
*dbits
, u8
*sbits
)
464 *dbits
= net
->xfrm
.policy_bydst
[dir
].dbits4
;
465 *sbits
= net
->xfrm
.policy_bydst
[dir
].sbits4
;
469 *dbits
= net
->xfrm
.policy_bydst
[dir
].dbits6
;
470 *sbits
= net
->xfrm
.policy_bydst
[dir
].sbits6
;
479 static struct hlist_head
*policy_hash_bysel(struct net
*net
,
480 const struct xfrm_selector
*sel
,
481 unsigned short family
, int dir
)
483 unsigned int hmask
= net
->xfrm
.policy_bydst
[dir
].hmask
;
488 __get_hash_thresh(net
, family
, dir
, &dbits
, &sbits
);
489 hash
= __sel_hash(sel
, family
, hmask
, dbits
, sbits
);
491 if (hash
== hmask
+ 1)
494 return rcu_dereference_check(net
->xfrm
.policy_bydst
[dir
].table
,
495 lockdep_is_held(&net
->xfrm
.xfrm_policy_lock
)) + hash
;
498 static struct hlist_head
*policy_hash_direct(struct net
*net
,
499 const xfrm_address_t
*daddr
,
500 const xfrm_address_t
*saddr
,
501 unsigned short family
, int dir
)
503 unsigned int hmask
= net
->xfrm
.policy_bydst
[dir
].hmask
;
508 __get_hash_thresh(net
, family
, dir
, &dbits
, &sbits
);
509 hash
= __addr_hash(daddr
, saddr
, family
, hmask
, dbits
, sbits
);
511 return rcu_dereference_check(net
->xfrm
.policy_bydst
[dir
].table
,
512 lockdep_is_held(&net
->xfrm
.xfrm_policy_lock
)) + hash
;
515 static void xfrm_dst_hash_transfer(struct net
*net
,
516 struct hlist_head
*list
,
517 struct hlist_head
*ndsttable
,
518 unsigned int nhashmask
,
521 struct hlist_node
*tmp
, *entry0
= NULL
;
522 struct xfrm_policy
*pol
;
528 hlist_for_each_entry_safe(pol
, tmp
, list
, bydst
) {
531 __get_hash_thresh(net
, pol
->family
, dir
, &dbits
, &sbits
);
532 h
= __addr_hash(&pol
->selector
.daddr
, &pol
->selector
.saddr
,
533 pol
->family
, nhashmask
, dbits
, sbits
);
535 hlist_del_rcu(&pol
->bydst
);
536 hlist_add_head_rcu(&pol
->bydst
, ndsttable
+ h
);
541 hlist_del_rcu(&pol
->bydst
);
542 hlist_add_behind_rcu(&pol
->bydst
, entry0
);
544 entry0
= &pol
->bydst
;
546 if (!hlist_empty(list
)) {
552 static void xfrm_idx_hash_transfer(struct hlist_head
*list
,
553 struct hlist_head
*nidxtable
,
554 unsigned int nhashmask
)
556 struct hlist_node
*tmp
;
557 struct xfrm_policy
*pol
;
559 hlist_for_each_entry_safe(pol
, tmp
, list
, byidx
) {
562 h
= __idx_hash(pol
->index
, nhashmask
);
563 hlist_add_head(&pol
->byidx
, nidxtable
+h
);
567 static unsigned long xfrm_new_hash_mask(unsigned int old_hmask
)
569 return ((old_hmask
+ 1) << 1) - 1;
572 static void xfrm_bydst_resize(struct net
*net
, int dir
)
574 unsigned int hmask
= net
->xfrm
.policy_bydst
[dir
].hmask
;
575 unsigned int nhashmask
= xfrm_new_hash_mask(hmask
);
576 unsigned int nsize
= (nhashmask
+ 1) * sizeof(struct hlist_head
);
577 struct hlist_head
*ndst
= xfrm_hash_alloc(nsize
);
578 struct hlist_head
*odst
;
584 spin_lock_bh(&net
->xfrm
.xfrm_policy_lock
);
585 write_seqcount_begin(&xfrm_policy_hash_generation
);
587 odst
= rcu_dereference_protected(net
->xfrm
.policy_bydst
[dir
].table
,
588 lockdep_is_held(&net
->xfrm
.xfrm_policy_lock
));
590 for (i
= hmask
; i
>= 0; i
--)
591 xfrm_dst_hash_transfer(net
, odst
+ i
, ndst
, nhashmask
, dir
);
593 rcu_assign_pointer(net
->xfrm
.policy_bydst
[dir
].table
, ndst
);
594 net
->xfrm
.policy_bydst
[dir
].hmask
= nhashmask
;
596 write_seqcount_end(&xfrm_policy_hash_generation
);
597 spin_unlock_bh(&net
->xfrm
.xfrm_policy_lock
);
601 xfrm_hash_free(odst
, (hmask
+ 1) * sizeof(struct hlist_head
));
604 static void xfrm_byidx_resize(struct net
*net
, int total
)
606 unsigned int hmask
= net
->xfrm
.policy_idx_hmask
;
607 unsigned int nhashmask
= xfrm_new_hash_mask(hmask
);
608 unsigned int nsize
= (nhashmask
+ 1) * sizeof(struct hlist_head
);
609 struct hlist_head
*oidx
= net
->xfrm
.policy_byidx
;
610 struct hlist_head
*nidx
= xfrm_hash_alloc(nsize
);
616 spin_lock_bh(&net
->xfrm
.xfrm_policy_lock
);
618 for (i
= hmask
; i
>= 0; i
--)
619 xfrm_idx_hash_transfer(oidx
+ i
, nidx
, nhashmask
);
621 net
->xfrm
.policy_byidx
= nidx
;
622 net
->xfrm
.policy_idx_hmask
= nhashmask
;
624 spin_unlock_bh(&net
->xfrm
.xfrm_policy_lock
);
626 xfrm_hash_free(oidx
, (hmask
+ 1) * sizeof(struct hlist_head
));
629 static inline int xfrm_bydst_should_resize(struct net
*net
, int dir
, int *total
)
631 unsigned int cnt
= net
->xfrm
.policy_count
[dir
];
632 unsigned int hmask
= net
->xfrm
.policy_bydst
[dir
].hmask
;
637 if ((hmask
+ 1) < xfrm_policy_hashmax
&&
644 static inline int xfrm_byidx_should_resize(struct net
*net
, int total
)
646 unsigned int hmask
= net
->xfrm
.policy_idx_hmask
;
648 if ((hmask
+ 1) < xfrm_policy_hashmax
&&
655 void xfrm_spd_getinfo(struct net
*net
, struct xfrmk_spdinfo
*si
)
657 si
->incnt
= net
->xfrm
.policy_count
[XFRM_POLICY_IN
];
658 si
->outcnt
= net
->xfrm
.policy_count
[XFRM_POLICY_OUT
];
659 si
->fwdcnt
= net
->xfrm
.policy_count
[XFRM_POLICY_FWD
];
660 si
->inscnt
= net
->xfrm
.policy_count
[XFRM_POLICY_IN
+XFRM_POLICY_MAX
];
661 si
->outscnt
= net
->xfrm
.policy_count
[XFRM_POLICY_OUT
+XFRM_POLICY_MAX
];
662 si
->fwdscnt
= net
->xfrm
.policy_count
[XFRM_POLICY_FWD
+XFRM_POLICY_MAX
];
663 si
->spdhcnt
= net
->xfrm
.policy_idx_hmask
;
664 si
->spdhmcnt
= xfrm_policy_hashmax
;
666 EXPORT_SYMBOL(xfrm_spd_getinfo
);
668 static DEFINE_MUTEX(hash_resize_mutex
);
669 static void xfrm_hash_resize(struct work_struct
*work
)
671 struct net
*net
= container_of(work
, struct net
, xfrm
.policy_hash_work
);
674 mutex_lock(&hash_resize_mutex
);
677 for (dir
= 0; dir
< XFRM_POLICY_MAX
; dir
++) {
678 if (xfrm_bydst_should_resize(net
, dir
, &total
))
679 xfrm_bydst_resize(net
, dir
);
681 if (xfrm_byidx_should_resize(net
, total
))
682 xfrm_byidx_resize(net
, total
);
684 mutex_unlock(&hash_resize_mutex
);
687 /* Make sure *pol can be inserted into fastbin.
688 * Useful to check that later insert requests will be sucessful
689 * (provided xfrm_policy_lock is held throughout).
691 static struct xfrm_pol_inexact_bin
*
692 xfrm_policy_inexact_alloc_bin(const struct xfrm_policy
*pol
, u8 dir
)
694 struct xfrm_pol_inexact_bin
*bin
, *prev
;
695 struct xfrm_pol_inexact_key k
= {
696 .family
= pol
->family
,
701 struct net
*net
= xp_net(pol
);
703 lockdep_assert_held(&net
->xfrm
.xfrm_policy_lock
);
705 write_pnet(&k
.net
, net
);
706 bin
= rhashtable_lookup_fast(&xfrm_policy_inexact_table
, &k
,
707 xfrm_pol_inexact_params
);
711 bin
= kzalloc(sizeof(*bin
), GFP_ATOMIC
);
716 INIT_HLIST_HEAD(&bin
->hhead
);
717 bin
->root_d
= RB_ROOT
;
718 bin
->root_s
= RB_ROOT
;
719 seqcount_init(&bin
->count
);
721 prev
= rhashtable_lookup_get_insert_key(&xfrm_policy_inexact_table
,
723 xfrm_pol_inexact_params
);
725 list_add(&bin
->inexact_bins
, &net
->xfrm
.inexact_bins
);
731 return IS_ERR(prev
) ? NULL
: prev
;
734 static bool xfrm_pol_inexact_addr_use_any_list(const xfrm_address_t
*addr
,
735 int family
, u8 prefixlen
)
737 if (xfrm_addr_any(addr
, family
))
740 if (family
== AF_INET6
&& prefixlen
< INEXACT_PREFIXLEN_IPV6
)
743 if (family
== AF_INET
&& prefixlen
< INEXACT_PREFIXLEN_IPV4
)
750 xfrm_policy_inexact_insert_use_any_list(const struct xfrm_policy
*policy
)
752 const xfrm_address_t
*addr
;
753 bool saddr_any
, daddr_any
;
756 addr
= &policy
->selector
.saddr
;
757 prefixlen
= policy
->selector
.prefixlen_s
;
759 saddr_any
= xfrm_pol_inexact_addr_use_any_list(addr
,
762 addr
= &policy
->selector
.daddr
;
763 prefixlen
= policy
->selector
.prefixlen_d
;
764 daddr_any
= xfrm_pol_inexact_addr_use_any_list(addr
,
767 return saddr_any
&& daddr_any
;
770 static void xfrm_pol_inexact_node_init(struct xfrm_pol_inexact_node
*node
,
771 const xfrm_address_t
*addr
, u8 prefixlen
)
774 node
->prefixlen
= prefixlen
;
777 static struct xfrm_pol_inexact_node
*
778 xfrm_pol_inexact_node_alloc(const xfrm_address_t
*addr
, u8 prefixlen
)
780 struct xfrm_pol_inexact_node
*node
;
782 node
= kzalloc(sizeof(*node
), GFP_ATOMIC
);
784 xfrm_pol_inexact_node_init(node
, addr
, prefixlen
);
789 static int xfrm_policy_addr_delta(const xfrm_address_t
*a
,
790 const xfrm_address_t
*b
,
791 u8 prefixlen
, u16 family
)
793 unsigned int pdw
, pbi
;
798 if (sizeof(long) == 4 && prefixlen
== 0)
799 return ntohl(a
->a4
) - ntohl(b
->a4
);
800 return (ntohl(a
->a4
) & ((~0UL << (32 - prefixlen
)))) -
801 (ntohl(b
->a4
) & ((~0UL << (32 - prefixlen
))));
803 pdw
= prefixlen
>> 5;
804 pbi
= prefixlen
& 0x1f;
807 delta
= memcmp(a
->a6
, b
->a6
, pdw
<< 2);
812 u32 mask
= ~0u << (32 - pbi
);
814 delta
= (ntohl(a
->a6
[pdw
]) & mask
) -
815 (ntohl(b
->a6
[pdw
]) & mask
);
825 static void xfrm_policy_inexact_list_reinsert(struct net
*net
,
826 struct xfrm_pol_inexact_node
*n
,
829 unsigned int matched_s
, matched_d
;
830 struct xfrm_policy
*policy
, *p
;
835 list_for_each_entry_reverse(policy
, &net
->xfrm
.policy_all
, walk
.all
) {
836 struct hlist_node
*newpos
= NULL
;
837 bool matches_s
, matches_d
;
839 if (!policy
->bydst_reinsert
)
842 WARN_ON_ONCE(policy
->family
!= family
);
844 policy
->bydst_reinsert
= false;
845 hlist_for_each_entry(p
, &n
->hhead
, bydst
) {
846 if (policy
->priority
> p
->priority
)
848 else if (policy
->priority
== p
->priority
&&
849 policy
->pos
> p
->pos
)
856 hlist_add_behind_rcu(&policy
->bydst
, newpos
);
858 hlist_add_head_rcu(&policy
->bydst
, &n
->hhead
);
860 /* paranoia checks follow.
861 * Check that the reinserted policy matches at least
862 * saddr or daddr for current node prefix.
864 * Matching both is fine, matching saddr in one policy
865 * (but not daddr) and then matching only daddr in another
868 matches_s
= xfrm_policy_addr_delta(&policy
->selector
.saddr
,
872 matches_d
= xfrm_policy_addr_delta(&policy
->selector
.daddr
,
876 if (matches_s
&& matches_d
)
879 WARN_ON_ONCE(!matches_s
&& !matches_d
);
884 WARN_ON_ONCE(matched_s
&& matched_d
);
888 static void xfrm_policy_inexact_node_reinsert(struct net
*net
,
889 struct xfrm_pol_inexact_node
*n
,
893 struct xfrm_pol_inexact_node
*node
;
894 struct rb_node
**p
, *parent
;
896 /* we should not have another subtree here */
897 WARN_ON_ONCE(!RB_EMPTY_ROOT(&n
->root
));
906 node
= rb_entry(*p
, struct xfrm_pol_inexact_node
, node
);
908 prefixlen
= min(node
->prefixlen
, n
->prefixlen
);
910 delta
= xfrm_policy_addr_delta(&n
->addr
, &node
->addr
,
913 p
= &parent
->rb_left
;
914 } else if (delta
> 0) {
915 p
= &parent
->rb_right
;
917 bool same_prefixlen
= node
->prefixlen
== n
->prefixlen
;
918 struct xfrm_policy
*tmp
;
920 hlist_for_each_entry(tmp
, &n
->hhead
, bydst
) {
921 tmp
->bydst_reinsert
= true;
922 hlist_del_rcu(&tmp
->bydst
);
925 node
->prefixlen
= prefixlen
;
927 xfrm_policy_inexact_list_reinsert(net
, node
, family
);
929 if (same_prefixlen
) {
941 rb_link_node_rcu(&n
->node
, parent
, p
);
942 rb_insert_color(&n
->node
, new);
945 /* merge nodes v and n */
946 static void xfrm_policy_inexact_node_merge(struct net
*net
,
947 struct xfrm_pol_inexact_node
*v
,
948 struct xfrm_pol_inexact_node
*n
,
951 struct xfrm_pol_inexact_node
*node
;
952 struct xfrm_policy
*tmp
;
953 struct rb_node
*rnode
;
955 /* To-be-merged node v has a subtree.
957 * Dismantle it and insert its nodes to n->root.
959 while ((rnode
= rb_first(&v
->root
)) != NULL
) {
960 node
= rb_entry(rnode
, struct xfrm_pol_inexact_node
, node
);
961 rb_erase(&node
->node
, &v
->root
);
962 xfrm_policy_inexact_node_reinsert(net
, node
, &n
->root
,
966 hlist_for_each_entry(tmp
, &v
->hhead
, bydst
) {
967 tmp
->bydst_reinsert
= true;
968 hlist_del_rcu(&tmp
->bydst
);
971 xfrm_policy_inexact_list_reinsert(net
, n
, family
);
974 static struct xfrm_pol_inexact_node
*
975 xfrm_policy_inexact_insert_node(struct net
*net
,
976 struct rb_root
*root
,
977 xfrm_address_t
*addr
,
978 u16 family
, u8 prefixlen
, u8 dir
)
980 struct xfrm_pol_inexact_node
*cached
= NULL
;
981 struct rb_node
**p
, *parent
= NULL
;
982 struct xfrm_pol_inexact_node
*node
;
989 node
= rb_entry(*p
, struct xfrm_pol_inexact_node
, node
);
991 delta
= xfrm_policy_addr_delta(addr
, &node
->addr
,
994 if (delta
== 0 && prefixlen
>= node
->prefixlen
) {
995 WARN_ON_ONCE(cached
); /* ipsec policies got lost */
1000 p
= &parent
->rb_left
;
1002 p
= &parent
->rb_right
;
1004 if (prefixlen
< node
->prefixlen
) {
1005 delta
= xfrm_policy_addr_delta(addr
, &node
->addr
,
1011 /* This node is a subnet of the new prefix. It needs
1012 * to be removed and re-inserted with the smaller
1013 * prefix and all nodes that are now also covered
1014 * by the reduced prefixlen.
1016 rb_erase(&node
->node
, root
);
1019 xfrm_pol_inexact_node_init(node
, addr
,
1023 /* This node also falls within the new
1024 * prefixlen. Merge the to-be-reinserted
1025 * node and this one.
1027 xfrm_policy_inexact_node_merge(net
, node
,
1029 kfree_rcu(node
, rcu
);
1040 node
= xfrm_pol_inexact_node_alloc(addr
, prefixlen
);
1045 rb_link_node_rcu(&node
->node
, parent
, p
);
1046 rb_insert_color(&node
->node
, root
);
1051 static void xfrm_policy_inexact_gc_tree(struct rb_root
*r
, bool rm
)
1053 struct xfrm_pol_inexact_node
*node
;
1054 struct rb_node
*rn
= rb_first(r
);
1057 node
= rb_entry(rn
, struct xfrm_pol_inexact_node
, node
);
1059 xfrm_policy_inexact_gc_tree(&node
->root
, rm
);
1062 if (!hlist_empty(&node
->hhead
) || !RB_EMPTY_ROOT(&node
->root
)) {
1067 rb_erase(&node
->node
, r
);
1068 kfree_rcu(node
, rcu
);
1072 static void __xfrm_policy_inexact_prune_bin(struct xfrm_pol_inexact_bin
*b
, bool net_exit
)
1074 write_seqcount_begin(&b
->count
);
1075 xfrm_policy_inexact_gc_tree(&b
->root_d
, net_exit
);
1076 xfrm_policy_inexact_gc_tree(&b
->root_s
, net_exit
);
1077 write_seqcount_end(&b
->count
);
1079 if (!RB_EMPTY_ROOT(&b
->root_d
) || !RB_EMPTY_ROOT(&b
->root_s
) ||
1080 !hlist_empty(&b
->hhead
)) {
1081 WARN_ON_ONCE(net_exit
);
1085 if (rhashtable_remove_fast(&xfrm_policy_inexact_table
, &b
->head
,
1086 xfrm_pol_inexact_params
) == 0) {
1087 list_del(&b
->inexact_bins
);
1092 static void xfrm_policy_inexact_prune_bin(struct xfrm_pol_inexact_bin
*b
)
1094 struct net
*net
= read_pnet(&b
->k
.net
);
1096 spin_lock_bh(&net
->xfrm
.xfrm_policy_lock
);
1097 __xfrm_policy_inexact_prune_bin(b
, false);
1098 spin_unlock_bh(&net
->xfrm
.xfrm_policy_lock
);
1101 static void __xfrm_policy_inexact_flush(struct net
*net
)
1103 struct xfrm_pol_inexact_bin
*bin
, *t
;
1105 lockdep_assert_held(&net
->xfrm
.xfrm_policy_lock
);
1107 list_for_each_entry_safe(bin
, t
, &net
->xfrm
.inexact_bins
, inexact_bins
)
1108 __xfrm_policy_inexact_prune_bin(bin
, false);
1111 static struct hlist_head
*
1112 xfrm_policy_inexact_alloc_chain(struct xfrm_pol_inexact_bin
*bin
,
1113 struct xfrm_policy
*policy
, u8 dir
)
1115 struct xfrm_pol_inexact_node
*n
;
1118 net
= xp_net(policy
);
1119 lockdep_assert_held(&net
->xfrm
.xfrm_policy_lock
);
1121 if (xfrm_policy_inexact_insert_use_any_list(policy
))
1124 if (xfrm_pol_inexact_addr_use_any_list(&policy
->selector
.daddr
,
1126 policy
->selector
.prefixlen_d
)) {
1127 write_seqcount_begin(&bin
->count
);
1128 n
= xfrm_policy_inexact_insert_node(net
,
1130 &policy
->selector
.saddr
,
1132 policy
->selector
.prefixlen_s
,
1134 write_seqcount_end(&bin
->count
);
1141 /* daddr is fixed */
1142 write_seqcount_begin(&bin
->count
);
1143 n
= xfrm_policy_inexact_insert_node(net
,
1145 &policy
->selector
.daddr
,
1147 policy
->selector
.prefixlen_d
, dir
);
1148 write_seqcount_end(&bin
->count
);
1152 /* saddr is wildcard */
1153 if (xfrm_pol_inexact_addr_use_any_list(&policy
->selector
.saddr
,
1155 policy
->selector
.prefixlen_s
))
1158 write_seqcount_begin(&bin
->count
);
1159 n
= xfrm_policy_inexact_insert_node(net
,
1161 &policy
->selector
.saddr
,
1163 policy
->selector
.prefixlen_s
, dir
);
1164 write_seqcount_end(&bin
->count
);
1171 static struct xfrm_policy
*
1172 xfrm_policy_inexact_insert(struct xfrm_policy
*policy
, u8 dir
, int excl
)
1174 struct xfrm_pol_inexact_bin
*bin
;
1175 struct xfrm_policy
*delpol
;
1176 struct hlist_head
*chain
;
1179 bin
= xfrm_policy_inexact_alloc_bin(policy
, dir
);
1181 return ERR_PTR(-ENOMEM
);
1183 net
= xp_net(policy
);
1184 lockdep_assert_held(&net
->xfrm
.xfrm_policy_lock
);
1186 chain
= xfrm_policy_inexact_alloc_chain(bin
, policy
, dir
);
1188 __xfrm_policy_inexact_prune_bin(bin
, false);
1189 return ERR_PTR(-ENOMEM
);
1192 delpol
= xfrm_policy_insert_list(chain
, policy
, excl
);
1193 if (delpol
&& excl
) {
1194 __xfrm_policy_inexact_prune_bin(bin
, false);
1195 return ERR_PTR(-EEXIST
);
1198 chain
= &net
->xfrm
.policy_inexact
[dir
];
1199 xfrm_policy_insert_inexact_list(chain
, policy
);
1202 __xfrm_policy_inexact_prune_bin(bin
, false);
1207 static void xfrm_hash_rebuild(struct work_struct
*work
)
1209 struct net
*net
= container_of(work
, struct net
,
1210 xfrm
.policy_hthresh
.work
);
1212 struct xfrm_policy
*pol
;
1213 struct xfrm_policy
*policy
;
1214 struct hlist_head
*chain
;
1215 struct hlist_head
*odst
;
1216 struct hlist_node
*newpos
;
1220 u8 lbits4
, rbits4
, lbits6
, rbits6
;
1222 mutex_lock(&hash_resize_mutex
);
1224 /* read selector prefixlen thresholds */
1226 seq
= read_seqbegin(&net
->xfrm
.policy_hthresh
.lock
);
1228 lbits4
= net
->xfrm
.policy_hthresh
.lbits4
;
1229 rbits4
= net
->xfrm
.policy_hthresh
.rbits4
;
1230 lbits6
= net
->xfrm
.policy_hthresh
.lbits6
;
1231 rbits6
= net
->xfrm
.policy_hthresh
.rbits6
;
1232 } while (read_seqretry(&net
->xfrm
.policy_hthresh
.lock
, seq
));
1234 spin_lock_bh(&net
->xfrm
.xfrm_policy_lock
);
1235 write_seqcount_begin(&xfrm_policy_hash_generation
);
1237 /* make sure that we can insert the indirect policies again before
1238 * we start with destructive action.
1240 list_for_each_entry(policy
, &net
->xfrm
.policy_all
, walk
.all
) {
1241 struct xfrm_pol_inexact_bin
*bin
;
1244 dir
= xfrm_policy_id2dir(policy
->index
);
1245 if (policy
->walk
.dead
|| dir
>= XFRM_POLICY_MAX
)
1248 if ((dir
& XFRM_POLICY_MASK
) == XFRM_POLICY_OUT
) {
1249 if (policy
->family
== AF_INET
) {
1257 if (policy
->family
== AF_INET
) {
1266 if (policy
->selector
.prefixlen_d
< dbits
||
1267 policy
->selector
.prefixlen_s
< sbits
)
1270 bin
= xfrm_policy_inexact_alloc_bin(policy
, dir
);
1274 if (!xfrm_policy_inexact_alloc_chain(bin
, policy
, dir
))
1278 /* reset the bydst and inexact table in all directions */
1279 for (dir
= 0; dir
< XFRM_POLICY_MAX
; dir
++) {
1280 struct hlist_node
*n
;
1282 hlist_for_each_entry_safe(policy
, n
,
1283 &net
->xfrm
.policy_inexact
[dir
],
1284 bydst_inexact_list
) {
1285 hlist_del_rcu(&policy
->bydst
);
1286 hlist_del_init(&policy
->bydst_inexact_list
);
1289 hmask
= net
->xfrm
.policy_bydst
[dir
].hmask
;
1290 odst
= net
->xfrm
.policy_bydst
[dir
].table
;
1291 for (i
= hmask
; i
>= 0; i
--) {
1292 hlist_for_each_entry_safe(policy
, n
, odst
+ i
, bydst
)
1293 hlist_del_rcu(&policy
->bydst
);
1295 if ((dir
& XFRM_POLICY_MASK
) == XFRM_POLICY_OUT
) {
1296 /* dir out => dst = remote, src = local */
1297 net
->xfrm
.policy_bydst
[dir
].dbits4
= rbits4
;
1298 net
->xfrm
.policy_bydst
[dir
].sbits4
= lbits4
;
1299 net
->xfrm
.policy_bydst
[dir
].dbits6
= rbits6
;
1300 net
->xfrm
.policy_bydst
[dir
].sbits6
= lbits6
;
1302 /* dir in/fwd => dst = local, src = remote */
1303 net
->xfrm
.policy_bydst
[dir
].dbits4
= lbits4
;
1304 net
->xfrm
.policy_bydst
[dir
].sbits4
= rbits4
;
1305 net
->xfrm
.policy_bydst
[dir
].dbits6
= lbits6
;
1306 net
->xfrm
.policy_bydst
[dir
].sbits6
= rbits6
;
1310 /* re-insert all policies by order of creation */
1311 list_for_each_entry_reverse(policy
, &net
->xfrm
.policy_all
, walk
.all
) {
1312 if (policy
->walk
.dead
)
1314 dir
= xfrm_policy_id2dir(policy
->index
);
1315 if (dir
>= XFRM_POLICY_MAX
) {
1316 /* skip socket policies */
1320 chain
= policy_hash_bysel(net
, &policy
->selector
,
1321 policy
->family
, dir
);
1324 void *p
= xfrm_policy_inexact_insert(policy
, dir
, 0);
1326 WARN_ONCE(IS_ERR(p
), "reinsert: %ld\n", PTR_ERR(p
));
1330 hlist_for_each_entry(pol
, chain
, bydst
) {
1331 if (policy
->priority
>= pol
->priority
)
1332 newpos
= &pol
->bydst
;
1337 hlist_add_behind_rcu(&policy
->bydst
, newpos
);
1339 hlist_add_head_rcu(&policy
->bydst
, chain
);
1343 __xfrm_policy_inexact_flush(net
);
1344 write_seqcount_end(&xfrm_policy_hash_generation
);
1345 spin_unlock_bh(&net
->xfrm
.xfrm_policy_lock
);
1347 mutex_unlock(&hash_resize_mutex
);
1350 void xfrm_policy_hash_rebuild(struct net
*net
)
1352 schedule_work(&net
->xfrm
.policy_hthresh
.work
);
1354 EXPORT_SYMBOL(xfrm_policy_hash_rebuild
);
1356 /* Generate new index... KAME seems to generate them ordered by cost
1357 * of an absolute inpredictability of ordering of rules. This will not pass. */
1358 static u32
xfrm_gen_index(struct net
*net
, int dir
, u32 index
)
1360 static u32 idx_generator
;
1363 struct hlist_head
*list
;
1364 struct xfrm_policy
*p
;
1369 idx
= (idx_generator
| dir
);
1378 list
= net
->xfrm
.policy_byidx
+ idx_hash(net
, idx
);
1380 hlist_for_each_entry(p
, list
, byidx
) {
1381 if (p
->index
== idx
) {
1391 static inline int selector_cmp(struct xfrm_selector
*s1
, struct xfrm_selector
*s2
)
1393 u32
*p1
= (u32
*) s1
;
1394 u32
*p2
= (u32
*) s2
;
1395 int len
= sizeof(struct xfrm_selector
) / sizeof(u32
);
1398 for (i
= 0; i
< len
; i
++) {
1406 static void xfrm_policy_requeue(struct xfrm_policy
*old
,
1407 struct xfrm_policy
*new)
1409 struct xfrm_policy_queue
*pq
= &old
->polq
;
1410 struct sk_buff_head list
;
1412 if (skb_queue_empty(&pq
->hold_queue
))
1415 __skb_queue_head_init(&list
);
1417 spin_lock_bh(&pq
->hold_queue
.lock
);
1418 skb_queue_splice_init(&pq
->hold_queue
, &list
);
1419 if (del_timer(&pq
->hold_timer
))
1421 spin_unlock_bh(&pq
->hold_queue
.lock
);
1425 spin_lock_bh(&pq
->hold_queue
.lock
);
1426 skb_queue_splice(&list
, &pq
->hold_queue
);
1427 pq
->timeout
= XFRM_QUEUE_TMO_MIN
;
1428 if (!mod_timer(&pq
->hold_timer
, jiffies
))
1430 spin_unlock_bh(&pq
->hold_queue
.lock
);
1433 static inline bool xfrm_policy_mark_match(const struct xfrm_mark
*mark
,
1434 struct xfrm_policy
*pol
)
1436 return mark
->v
== pol
->mark
.v
&& mark
->m
== pol
->mark
.m
;
1439 static u32
xfrm_pol_bin_key(const void *data
, u32 len
, u32 seed
)
1441 const struct xfrm_pol_inexact_key
*k
= data
;
1442 u32 a
= k
->type
<< 24 | k
->dir
<< 16 | k
->family
;
1444 return jhash_3words(a
, k
->if_id
, net_hash_mix(read_pnet(&k
->net
)),
1448 static u32
xfrm_pol_bin_obj(const void *data
, u32 len
, u32 seed
)
1450 const struct xfrm_pol_inexact_bin
*b
= data
;
1452 return xfrm_pol_bin_key(&b
->k
, 0, seed
);
1455 static int xfrm_pol_bin_cmp(struct rhashtable_compare_arg
*arg
,
1458 const struct xfrm_pol_inexact_key
*key
= arg
->key
;
1459 const struct xfrm_pol_inexact_bin
*b
= ptr
;
1462 if (!net_eq(read_pnet(&b
->k
.net
), read_pnet(&key
->net
)))
1465 ret
= b
->k
.dir
^ key
->dir
;
1469 ret
= b
->k
.type
^ key
->type
;
1473 ret
= b
->k
.family
^ key
->family
;
1477 return b
->k
.if_id
^ key
->if_id
;
1480 static const struct rhashtable_params xfrm_pol_inexact_params
= {
1481 .head_offset
= offsetof(struct xfrm_pol_inexact_bin
, head
),
1482 .hashfn
= xfrm_pol_bin_key
,
1483 .obj_hashfn
= xfrm_pol_bin_obj
,
1484 .obj_cmpfn
= xfrm_pol_bin_cmp
,
1485 .automatic_shrinking
= true,
1488 static void xfrm_policy_insert_inexact_list(struct hlist_head
*chain
,
1489 struct xfrm_policy
*policy
)
1491 struct xfrm_policy
*pol
, *delpol
= NULL
;
1492 struct hlist_node
*newpos
= NULL
;
1495 hlist_for_each_entry(pol
, chain
, bydst_inexact_list
) {
1496 if (pol
->type
== policy
->type
&&
1497 pol
->if_id
== policy
->if_id
&&
1498 !selector_cmp(&pol
->selector
, &policy
->selector
) &&
1499 xfrm_policy_mark_match(&policy
->mark
, pol
) &&
1500 xfrm_sec_ctx_match(pol
->security
, policy
->security
) &&
1503 if (policy
->priority
> pol
->priority
)
1505 } else if (policy
->priority
>= pol
->priority
) {
1506 newpos
= &pol
->bydst_inexact_list
;
1514 hlist_add_behind_rcu(&policy
->bydst_inexact_list
, newpos
);
1516 hlist_add_head_rcu(&policy
->bydst_inexact_list
, chain
);
1518 hlist_for_each_entry(pol
, chain
, bydst_inexact_list
) {
1524 static struct xfrm_policy
*xfrm_policy_insert_list(struct hlist_head
*chain
,
1525 struct xfrm_policy
*policy
,
1528 struct xfrm_policy
*pol
, *newpos
= NULL
, *delpol
= NULL
;
1530 hlist_for_each_entry(pol
, chain
, bydst
) {
1531 if (pol
->type
== policy
->type
&&
1532 pol
->if_id
== policy
->if_id
&&
1533 !selector_cmp(&pol
->selector
, &policy
->selector
) &&
1534 xfrm_policy_mark_match(&policy
->mark
, pol
) &&
1535 xfrm_sec_ctx_match(pol
->security
, policy
->security
) &&
1538 return ERR_PTR(-EEXIST
);
1540 if (policy
->priority
> pol
->priority
)
1542 } else if (policy
->priority
>= pol
->priority
) {
1551 hlist_add_behind_rcu(&policy
->bydst
, &newpos
->bydst
);
1553 hlist_add_head_rcu(&policy
->bydst
, chain
);
1558 int xfrm_policy_insert(int dir
, struct xfrm_policy
*policy
, int excl
)
1560 struct net
*net
= xp_net(policy
);
1561 struct xfrm_policy
*delpol
;
1562 struct hlist_head
*chain
;
1564 spin_lock_bh(&net
->xfrm
.xfrm_policy_lock
);
1565 chain
= policy_hash_bysel(net
, &policy
->selector
, policy
->family
, dir
);
1567 delpol
= xfrm_policy_insert_list(chain
, policy
, excl
);
1569 delpol
= xfrm_policy_inexact_insert(policy
, dir
, excl
);
1571 if (IS_ERR(delpol
)) {
1572 spin_unlock_bh(&net
->xfrm
.xfrm_policy_lock
);
1573 return PTR_ERR(delpol
);
1576 __xfrm_policy_link(policy
, dir
);
1578 /* After previous checking, family can either be AF_INET or AF_INET6 */
1579 if (policy
->family
== AF_INET
)
1580 rt_genid_bump_ipv4(net
);
1582 rt_genid_bump_ipv6(net
);
1585 xfrm_policy_requeue(delpol
, policy
);
1586 __xfrm_policy_unlink(delpol
, dir
);
1588 policy
->index
= delpol
? delpol
->index
: xfrm_gen_index(net
, dir
, policy
->index
);
1589 hlist_add_head(&policy
->byidx
, net
->xfrm
.policy_byidx
+idx_hash(net
, policy
->index
));
1590 policy
->curlft
.add_time
= ktime_get_real_seconds();
1591 policy
->curlft
.use_time
= 0;
1592 if (!mod_timer(&policy
->timer
, jiffies
+ HZ
))
1593 xfrm_pol_hold(policy
);
1594 spin_unlock_bh(&net
->xfrm
.xfrm_policy_lock
);
1597 xfrm_policy_kill(delpol
);
1598 else if (xfrm_bydst_should_resize(net
, dir
, NULL
))
1599 schedule_work(&net
->xfrm
.policy_hash_work
);
1603 EXPORT_SYMBOL(xfrm_policy_insert
);
1605 static struct xfrm_policy
*
1606 __xfrm_policy_bysel_ctx(struct hlist_head
*chain
, const struct xfrm_mark
*mark
,
1607 u32 if_id
, u8 type
, int dir
, struct xfrm_selector
*sel
,
1608 struct xfrm_sec_ctx
*ctx
)
1610 struct xfrm_policy
*pol
;
1615 hlist_for_each_entry(pol
, chain
, bydst
) {
1616 if (pol
->type
== type
&&
1617 pol
->if_id
== if_id
&&
1618 xfrm_policy_mark_match(mark
, pol
) &&
1619 !selector_cmp(sel
, &pol
->selector
) &&
1620 xfrm_sec_ctx_match(ctx
, pol
->security
))
1627 struct xfrm_policy
*
1628 xfrm_policy_bysel_ctx(struct net
*net
, const struct xfrm_mark
*mark
, u32 if_id
,
1629 u8 type
, int dir
, struct xfrm_selector
*sel
,
1630 struct xfrm_sec_ctx
*ctx
, int delete, int *err
)
1632 struct xfrm_pol_inexact_bin
*bin
= NULL
;
1633 struct xfrm_policy
*pol
, *ret
= NULL
;
1634 struct hlist_head
*chain
;
1637 spin_lock_bh(&net
->xfrm
.xfrm_policy_lock
);
1638 chain
= policy_hash_bysel(net
, sel
, sel
->family
, dir
);
1640 struct xfrm_pol_inexact_candidates cand
;
1643 bin
= xfrm_policy_inexact_lookup(net
, type
,
1644 sel
->family
, dir
, if_id
);
1646 spin_unlock_bh(&net
->xfrm
.xfrm_policy_lock
);
1650 if (!xfrm_policy_find_inexact_candidates(&cand
, bin
,
1653 spin_unlock_bh(&net
->xfrm
.xfrm_policy_lock
);
1658 for (i
= 0; i
< ARRAY_SIZE(cand
.res
); i
++) {
1659 struct xfrm_policy
*tmp
;
1661 tmp
= __xfrm_policy_bysel_ctx(cand
.res
[i
], mark
,
1667 if (!pol
|| tmp
->pos
< pol
->pos
)
1671 pol
= __xfrm_policy_bysel_ctx(chain
, mark
, if_id
, type
, dir
,
1678 *err
= security_xfrm_policy_delete(pol
->security
);
1680 spin_unlock_bh(&net
->xfrm
.xfrm_policy_lock
);
1683 __xfrm_policy_unlink(pol
, dir
);
1687 spin_unlock_bh(&net
->xfrm
.xfrm_policy_lock
);
1690 xfrm_policy_kill(ret
);
1692 xfrm_policy_inexact_prune_bin(bin
);
1695 EXPORT_SYMBOL(xfrm_policy_bysel_ctx
);
1697 struct xfrm_policy
*
1698 xfrm_policy_byid(struct net
*net
, const struct xfrm_mark
*mark
, u32 if_id
,
1699 u8 type
, int dir
, u32 id
, int delete, int *err
)
1701 struct xfrm_policy
*pol
, *ret
;
1702 struct hlist_head
*chain
;
1705 if (xfrm_policy_id2dir(id
) != dir
)
1709 spin_lock_bh(&net
->xfrm
.xfrm_policy_lock
);
1710 chain
= net
->xfrm
.policy_byidx
+ idx_hash(net
, id
);
1712 hlist_for_each_entry(pol
, chain
, byidx
) {
1713 if (pol
->type
== type
&& pol
->index
== id
&&
1714 pol
->if_id
== if_id
&& xfrm_policy_mark_match(mark
, pol
)) {
1717 *err
= security_xfrm_policy_delete(
1720 spin_unlock_bh(&net
->xfrm
.xfrm_policy_lock
);
1723 __xfrm_policy_unlink(pol
, dir
);
1729 spin_unlock_bh(&net
->xfrm
.xfrm_policy_lock
);
1732 xfrm_policy_kill(ret
);
1735 EXPORT_SYMBOL(xfrm_policy_byid
);
1737 #ifdef CONFIG_SECURITY_NETWORK_XFRM
1739 xfrm_policy_flush_secctx_check(struct net
*net
, u8 type
, bool task_valid
)
1741 struct xfrm_policy
*pol
;
1744 list_for_each_entry(pol
, &net
->xfrm
.policy_all
, walk
.all
) {
1745 if (pol
->walk
.dead
||
1746 xfrm_policy_id2dir(pol
->index
) >= XFRM_POLICY_MAX
||
1750 err
= security_xfrm_policy_delete(pol
->security
);
1752 xfrm_audit_policy_delete(pol
, 0, task_valid
);
1760 xfrm_policy_flush_secctx_check(struct net
*net
, u8 type
, bool task_valid
)
1766 int xfrm_policy_flush(struct net
*net
, u8 type
, bool task_valid
)
1768 int dir
, err
= 0, cnt
= 0;
1769 struct xfrm_policy
*pol
;
1771 spin_lock_bh(&net
->xfrm
.xfrm_policy_lock
);
1773 err
= xfrm_policy_flush_secctx_check(net
, type
, task_valid
);
1778 list_for_each_entry(pol
, &net
->xfrm
.policy_all
, walk
.all
) {
1779 dir
= xfrm_policy_id2dir(pol
->index
);
1780 if (pol
->walk
.dead
||
1781 dir
>= XFRM_POLICY_MAX
||
1785 __xfrm_policy_unlink(pol
, dir
);
1786 spin_unlock_bh(&net
->xfrm
.xfrm_policy_lock
);
1788 xfrm_audit_policy_delete(pol
, 1, task_valid
);
1789 xfrm_policy_kill(pol
);
1790 spin_lock_bh(&net
->xfrm
.xfrm_policy_lock
);
1794 __xfrm_policy_inexact_flush(net
);
1798 spin_unlock_bh(&net
->xfrm
.xfrm_policy_lock
);
1801 EXPORT_SYMBOL(xfrm_policy_flush
);
1803 int xfrm_policy_walk(struct net
*net
, struct xfrm_policy_walk
*walk
,
1804 int (*func
)(struct xfrm_policy
*, int, int, void*),
1807 struct xfrm_policy
*pol
;
1808 struct xfrm_policy_walk_entry
*x
;
1811 if (walk
->type
>= XFRM_POLICY_TYPE_MAX
&&
1812 walk
->type
!= XFRM_POLICY_TYPE_ANY
)
1815 if (list_empty(&walk
->walk
.all
) && walk
->seq
!= 0)
1818 spin_lock_bh(&net
->xfrm
.xfrm_policy_lock
);
1819 if (list_empty(&walk
->walk
.all
))
1820 x
= list_first_entry(&net
->xfrm
.policy_all
, struct xfrm_policy_walk_entry
, all
);
1822 x
= list_first_entry(&walk
->walk
.all
,
1823 struct xfrm_policy_walk_entry
, all
);
1825 list_for_each_entry_from(x
, &net
->xfrm
.policy_all
, all
) {
1828 pol
= container_of(x
, struct xfrm_policy
, walk
);
1829 if (walk
->type
!= XFRM_POLICY_TYPE_ANY
&&
1830 walk
->type
!= pol
->type
)
1832 error
= func(pol
, xfrm_policy_id2dir(pol
->index
),
1835 list_move_tail(&walk
->walk
.all
, &x
->all
);
1840 if (walk
->seq
== 0) {
1844 list_del_init(&walk
->walk
.all
);
1846 spin_unlock_bh(&net
->xfrm
.xfrm_policy_lock
);
1849 EXPORT_SYMBOL(xfrm_policy_walk
);
1851 void xfrm_policy_walk_init(struct xfrm_policy_walk
*walk
, u8 type
)
1853 INIT_LIST_HEAD(&walk
->walk
.all
);
1854 walk
->walk
.dead
= 1;
1858 EXPORT_SYMBOL(xfrm_policy_walk_init
);
1860 void xfrm_policy_walk_done(struct xfrm_policy_walk
*walk
, struct net
*net
)
1862 if (list_empty(&walk
->walk
.all
))
1865 spin_lock_bh(&net
->xfrm
.xfrm_policy_lock
); /*FIXME where is net? */
1866 list_del(&walk
->walk
.all
);
1867 spin_unlock_bh(&net
->xfrm
.xfrm_policy_lock
);
1869 EXPORT_SYMBOL(xfrm_policy_walk_done
);
1872 * Find policy to apply to this flow.
1874 * Returns 0 if policy found, else an -errno.
1876 static int xfrm_policy_match(const struct xfrm_policy
*pol
,
1877 const struct flowi
*fl
,
1878 u8 type
, u16 family
, int dir
, u32 if_id
)
1880 const struct xfrm_selector
*sel
= &pol
->selector
;
1884 if (pol
->family
!= family
||
1885 pol
->if_id
!= if_id
||
1886 (fl
->flowi_mark
& pol
->mark
.m
) != pol
->mark
.v
||
1890 match
= xfrm_selector_match(sel
, fl
, family
);
1892 ret
= security_xfrm_policy_lookup(pol
->security
, fl
->flowi_secid
,
1897 static struct xfrm_pol_inexact_node
*
1898 xfrm_policy_lookup_inexact_addr(const struct rb_root
*r
,
1900 const xfrm_address_t
*addr
, u16 family
)
1902 const struct rb_node
*parent
;
1906 seq
= read_seqcount_begin(count
);
1908 parent
= rcu_dereference_raw(r
->rb_node
);
1910 struct xfrm_pol_inexact_node
*node
;
1913 node
= rb_entry(parent
, struct xfrm_pol_inexact_node
, node
);
1915 delta
= xfrm_policy_addr_delta(addr
, &node
->addr
,
1916 node
->prefixlen
, family
);
1918 parent
= rcu_dereference_raw(parent
->rb_left
);
1920 } else if (delta
> 0) {
1921 parent
= rcu_dereference_raw(parent
->rb_right
);
1928 if (read_seqcount_retry(count
, seq
))
1935 xfrm_policy_find_inexact_candidates(struct xfrm_pol_inexact_candidates
*cand
,
1936 struct xfrm_pol_inexact_bin
*b
,
1937 const xfrm_address_t
*saddr
,
1938 const xfrm_address_t
*daddr
)
1940 struct xfrm_pol_inexact_node
*n
;
1946 family
= b
->k
.family
;
1947 memset(cand
, 0, sizeof(*cand
));
1948 cand
->res
[XFRM_POL_CAND_ANY
] = &b
->hhead
;
1950 n
= xfrm_policy_lookup_inexact_addr(&b
->root_d
, &b
->count
, daddr
,
1953 cand
->res
[XFRM_POL_CAND_DADDR
] = &n
->hhead
;
1954 n
= xfrm_policy_lookup_inexact_addr(&n
->root
, &b
->count
, saddr
,
1957 cand
->res
[XFRM_POL_CAND_BOTH
] = &n
->hhead
;
1960 n
= xfrm_policy_lookup_inexact_addr(&b
->root_s
, &b
->count
, saddr
,
1963 cand
->res
[XFRM_POL_CAND_SADDR
] = &n
->hhead
;
1968 static struct xfrm_pol_inexact_bin
*
1969 xfrm_policy_inexact_lookup_rcu(struct net
*net
, u8 type
, u16 family
,
1972 struct xfrm_pol_inexact_key k
= {
1979 write_pnet(&k
.net
, net
);
1981 return rhashtable_lookup(&xfrm_policy_inexact_table
, &k
,
1982 xfrm_pol_inexact_params
);
1985 static struct xfrm_pol_inexact_bin
*
1986 xfrm_policy_inexact_lookup(struct net
*net
, u8 type
, u16 family
,
1989 struct xfrm_pol_inexact_bin
*bin
;
1991 lockdep_assert_held(&net
->xfrm
.xfrm_policy_lock
);
1994 bin
= xfrm_policy_inexact_lookup_rcu(net
, type
, family
, dir
, if_id
);
2000 static struct xfrm_policy
*
2001 __xfrm_policy_eval_candidates(struct hlist_head
*chain
,
2002 struct xfrm_policy
*prefer
,
2003 const struct flowi
*fl
,
2004 u8 type
, u16 family
, int dir
, u32 if_id
)
2006 u32 priority
= prefer
? prefer
->priority
: ~0u;
2007 struct xfrm_policy
*pol
;
2012 hlist_for_each_entry_rcu(pol
, chain
, bydst
) {
2015 if (pol
->priority
> priority
)
2018 err
= xfrm_policy_match(pol
, fl
, type
, family
, dir
, if_id
);
2021 return ERR_PTR(err
);
2027 /* matches. Is it older than *prefer? */
2028 if (pol
->priority
== priority
&&
2029 prefer
->pos
< pol
->pos
)
2039 static struct xfrm_policy
*
2040 xfrm_policy_eval_candidates(struct xfrm_pol_inexact_candidates
*cand
,
2041 struct xfrm_policy
*prefer
,
2042 const struct flowi
*fl
,
2043 u8 type
, u16 family
, int dir
, u32 if_id
)
2045 struct xfrm_policy
*tmp
;
2048 for (i
= 0; i
< ARRAY_SIZE(cand
->res
); i
++) {
2049 tmp
= __xfrm_policy_eval_candidates(cand
->res
[i
],
2051 fl
, type
, family
, dir
,
2064 static struct xfrm_policy
*xfrm_policy_lookup_bytype(struct net
*net
, u8 type
,
2065 const struct flowi
*fl
,
2069 struct xfrm_pol_inexact_candidates cand
;
2070 const xfrm_address_t
*daddr
, *saddr
;
2071 struct xfrm_pol_inexact_bin
*bin
;
2072 struct xfrm_policy
*pol
, *ret
;
2073 struct hlist_head
*chain
;
2074 unsigned int sequence
;
2077 daddr
= xfrm_flowi_daddr(fl
, family
);
2078 saddr
= xfrm_flowi_saddr(fl
, family
);
2079 if (unlikely(!daddr
|| !saddr
))
2085 sequence
= read_seqcount_begin(&xfrm_policy_hash_generation
);
2086 chain
= policy_hash_direct(net
, daddr
, saddr
, family
, dir
);
2087 } while (read_seqcount_retry(&xfrm_policy_hash_generation
, sequence
));
2090 hlist_for_each_entry_rcu(pol
, chain
, bydst
) {
2091 err
= xfrm_policy_match(pol
, fl
, type
, family
, dir
, if_id
);
2104 bin
= xfrm_policy_inexact_lookup_rcu(net
, type
, family
, dir
, if_id
);
2105 if (!bin
|| !xfrm_policy_find_inexact_candidates(&cand
, bin
, saddr
,
2109 pol
= xfrm_policy_eval_candidates(&cand
, ret
, fl
, type
,
2110 family
, dir
, if_id
);
2118 if (read_seqcount_retry(&xfrm_policy_hash_generation
, sequence
))
2121 if (ret
&& !xfrm_pol_hold_rcu(ret
))
2129 static struct xfrm_policy
*xfrm_policy_lookup(struct net
*net
,
2130 const struct flowi
*fl
,
2131 u16 family
, u8 dir
, u32 if_id
)
2133 #ifdef CONFIG_XFRM_SUB_POLICY
2134 struct xfrm_policy
*pol
;
2136 pol
= xfrm_policy_lookup_bytype(net
, XFRM_POLICY_TYPE_SUB
, fl
, family
,
2141 return xfrm_policy_lookup_bytype(net
, XFRM_POLICY_TYPE_MAIN
, fl
, family
,
2145 static struct xfrm_policy
*xfrm_sk_policy_lookup(const struct sock
*sk
, int dir
,
2146 const struct flowi
*fl
,
2147 u16 family
, u32 if_id
)
2149 struct xfrm_policy
*pol
;
2153 pol
= rcu_dereference(sk
->sk_policy
[dir
]);
2158 if (pol
->family
!= family
) {
2163 match
= xfrm_selector_match(&pol
->selector
, fl
, family
);
2165 if ((sk
->sk_mark
& pol
->mark
.m
) != pol
->mark
.v
||
2166 pol
->if_id
!= if_id
) {
2170 err
= security_xfrm_policy_lookup(pol
->security
,
2174 if (!xfrm_pol_hold_rcu(pol
))
2176 } else if (err
== -ESRCH
) {
2189 static void __xfrm_policy_link(struct xfrm_policy
*pol
, int dir
)
2191 struct net
*net
= xp_net(pol
);
2193 list_add(&pol
->walk
.all
, &net
->xfrm
.policy_all
);
2194 net
->xfrm
.policy_count
[dir
]++;
2198 static struct xfrm_policy
*__xfrm_policy_unlink(struct xfrm_policy
*pol
,
2201 struct net
*net
= xp_net(pol
);
2203 if (list_empty(&pol
->walk
.all
))
2206 /* Socket policies are not hashed. */
2207 if (!hlist_unhashed(&pol
->bydst
)) {
2208 hlist_del_rcu(&pol
->bydst
);
2209 hlist_del_init(&pol
->bydst_inexact_list
);
2210 hlist_del(&pol
->byidx
);
2213 list_del_init(&pol
->walk
.all
);
2214 net
->xfrm
.policy_count
[dir
]--;
2219 static void xfrm_sk_policy_link(struct xfrm_policy
*pol
, int dir
)
2221 __xfrm_policy_link(pol
, XFRM_POLICY_MAX
+ dir
);
2224 static void xfrm_sk_policy_unlink(struct xfrm_policy
*pol
, int dir
)
2226 __xfrm_policy_unlink(pol
, XFRM_POLICY_MAX
+ dir
);
2229 int xfrm_policy_delete(struct xfrm_policy
*pol
, int dir
)
2231 struct net
*net
= xp_net(pol
);
2233 spin_lock_bh(&net
->xfrm
.xfrm_policy_lock
);
2234 pol
= __xfrm_policy_unlink(pol
, dir
);
2235 spin_unlock_bh(&net
->xfrm
.xfrm_policy_lock
);
2237 xfrm_policy_kill(pol
);
2242 EXPORT_SYMBOL(xfrm_policy_delete
);
2244 int xfrm_sk_policy_insert(struct sock
*sk
, int dir
, struct xfrm_policy
*pol
)
2246 struct net
*net
= sock_net(sk
);
2247 struct xfrm_policy
*old_pol
;
2249 #ifdef CONFIG_XFRM_SUB_POLICY
2250 if (pol
&& pol
->type
!= XFRM_POLICY_TYPE_MAIN
)
2254 spin_lock_bh(&net
->xfrm
.xfrm_policy_lock
);
2255 old_pol
= rcu_dereference_protected(sk
->sk_policy
[dir
],
2256 lockdep_is_held(&net
->xfrm
.xfrm_policy_lock
));
2258 pol
->curlft
.add_time
= ktime_get_real_seconds();
2259 pol
->index
= xfrm_gen_index(net
, XFRM_POLICY_MAX
+dir
, 0);
2260 xfrm_sk_policy_link(pol
, dir
);
2262 rcu_assign_pointer(sk
->sk_policy
[dir
], pol
);
2265 xfrm_policy_requeue(old_pol
, pol
);
2267 /* Unlinking succeeds always. This is the only function
2268 * allowed to delete or replace socket policy.
2270 xfrm_sk_policy_unlink(old_pol
, dir
);
2272 spin_unlock_bh(&net
->xfrm
.xfrm_policy_lock
);
2275 xfrm_policy_kill(old_pol
);
2280 static struct xfrm_policy
*clone_policy(const struct xfrm_policy
*old
, int dir
)
2282 struct xfrm_policy
*newp
= xfrm_policy_alloc(xp_net(old
), GFP_ATOMIC
);
2283 struct net
*net
= xp_net(old
);
2286 newp
->selector
= old
->selector
;
2287 if (security_xfrm_policy_clone(old
->security
,
2290 return NULL
; /* ENOMEM */
2292 newp
->lft
= old
->lft
;
2293 newp
->curlft
= old
->curlft
;
2294 newp
->mark
= old
->mark
;
2295 newp
->if_id
= old
->if_id
;
2296 newp
->action
= old
->action
;
2297 newp
->flags
= old
->flags
;
2298 newp
->xfrm_nr
= old
->xfrm_nr
;
2299 newp
->index
= old
->index
;
2300 newp
->type
= old
->type
;
2301 newp
->family
= old
->family
;
2302 memcpy(newp
->xfrm_vec
, old
->xfrm_vec
,
2303 newp
->xfrm_nr
*sizeof(struct xfrm_tmpl
));
2304 spin_lock_bh(&net
->xfrm
.xfrm_policy_lock
);
2305 xfrm_sk_policy_link(newp
, dir
);
2306 spin_unlock_bh(&net
->xfrm
.xfrm_policy_lock
);
2312 int __xfrm_sk_clone_policy(struct sock
*sk
, const struct sock
*osk
)
2314 const struct xfrm_policy
*p
;
2315 struct xfrm_policy
*np
;
2319 for (i
= 0; i
< 2; i
++) {
2320 p
= rcu_dereference(osk
->sk_policy
[i
]);
2322 np
= clone_policy(p
, i
);
2323 if (unlikely(!np
)) {
2327 rcu_assign_pointer(sk
->sk_policy
[i
], np
);
2335 xfrm_get_saddr(struct net
*net
, int oif
, xfrm_address_t
*local
,
2336 xfrm_address_t
*remote
, unsigned short family
, u32 mark
)
2339 const struct xfrm_policy_afinfo
*afinfo
= xfrm_policy_get_afinfo(family
);
2341 if (unlikely(afinfo
== NULL
))
2343 err
= afinfo
->get_saddr(net
, oif
, local
, remote
, mark
);
2348 /* Resolve list of templates for the flow, given policy. */
2351 xfrm_tmpl_resolve_one(struct xfrm_policy
*policy
, const struct flowi
*fl
,
2352 struct xfrm_state
**xfrm
, unsigned short family
)
2354 struct net
*net
= xp_net(policy
);
2357 xfrm_address_t
*daddr
= xfrm_flowi_daddr(fl
, family
);
2358 xfrm_address_t
*saddr
= xfrm_flowi_saddr(fl
, family
);
2361 for (nx
= 0, i
= 0; i
< policy
->xfrm_nr
; i
++) {
2362 struct xfrm_state
*x
;
2363 xfrm_address_t
*remote
= daddr
;
2364 xfrm_address_t
*local
= saddr
;
2365 struct xfrm_tmpl
*tmpl
= &policy
->xfrm_vec
[i
];
2367 if (tmpl
->mode
== XFRM_MODE_TUNNEL
||
2368 tmpl
->mode
== XFRM_MODE_BEET
) {
2369 remote
= &tmpl
->id
.daddr
;
2370 local
= &tmpl
->saddr
;
2371 if (xfrm_addr_any(local
, tmpl
->encap_family
)) {
2372 error
= xfrm_get_saddr(net
, fl
->flowi_oif
,
2374 tmpl
->encap_family
, 0);
2381 x
= xfrm_state_find(remote
, local
, fl
, tmpl
, policy
, &error
,
2382 family
, policy
->if_id
);
2384 if (x
&& x
->km
.state
== XFRM_STATE_VALID
) {
2391 error
= (x
->km
.state
== XFRM_STATE_ERROR
?
2394 } else if (error
== -ESRCH
) {
2398 if (!tmpl
->optional
)
2404 for (nx
--; nx
>= 0; nx
--)
2405 xfrm_state_put(xfrm
[nx
]);
2410 xfrm_tmpl_resolve(struct xfrm_policy
**pols
, int npols
, const struct flowi
*fl
,
2411 struct xfrm_state
**xfrm
, unsigned short family
)
2413 struct xfrm_state
*tp
[XFRM_MAX_DEPTH
];
2414 struct xfrm_state
**tpp
= (npols
> 1) ? tp
: xfrm
;
2420 for (i
= 0; i
< npols
; i
++) {
2421 if (cnx
+ pols
[i
]->xfrm_nr
>= XFRM_MAX_DEPTH
) {
2426 ret
= xfrm_tmpl_resolve_one(pols
[i
], fl
, &tpp
[cnx
], family
);
2434 /* found states are sorted for outbound processing */
2436 xfrm_state_sort(xfrm
, tpp
, cnx
, family
);
2441 for (cnx
--; cnx
>= 0; cnx
--)
2442 xfrm_state_put(tpp
[cnx
]);
2447 static int xfrm_get_tos(const struct flowi
*fl
, int family
)
2449 if (family
== AF_INET
)
2450 return IPTOS_RT_MASK
& fl
->u
.ip4
.flowi4_tos
;
2455 static inline struct xfrm_dst
*xfrm_alloc_dst(struct net
*net
, int family
)
2457 const struct xfrm_policy_afinfo
*afinfo
= xfrm_policy_get_afinfo(family
);
2458 struct dst_ops
*dst_ops
;
2459 struct xfrm_dst
*xdst
;
2462 return ERR_PTR(-EINVAL
);
2466 dst_ops
= &net
->xfrm
.xfrm4_dst_ops
;
2468 #if IS_ENABLED(CONFIG_IPV6)
2470 dst_ops
= &net
->xfrm
.xfrm6_dst_ops
;
2476 xdst
= dst_alloc(dst_ops
, NULL
, 1, DST_OBSOLETE_NONE
, 0);
2479 struct dst_entry
*dst
= &xdst
->u
.dst
;
2481 memset(dst
+ 1, 0, sizeof(*xdst
) - sizeof(*dst
));
2483 xdst
= ERR_PTR(-ENOBUFS
);
2490 static void xfrm_init_path(struct xfrm_dst
*path
, struct dst_entry
*dst
,
2493 if (dst
->ops
->family
== AF_INET6
) {
2494 struct rt6_info
*rt
= (struct rt6_info
*)dst
;
2495 path
->path_cookie
= rt6_get_cookie(rt
);
2496 path
->u
.rt6
.rt6i_nfheader_len
= nfheader_len
;
2500 static inline int xfrm_fill_dst(struct xfrm_dst
*xdst
, struct net_device
*dev
,
2501 const struct flowi
*fl
)
2503 const struct xfrm_policy_afinfo
*afinfo
=
2504 xfrm_policy_get_afinfo(xdst
->u
.dst
.ops
->family
);
2510 err
= afinfo
->fill_dst(xdst
, dev
, fl
);
2518 /* Allocate chain of dst_entry's, attach known xfrm's, calculate
2519 * all the metrics... Shortly, bundle a bundle.
2522 static struct dst_entry
*xfrm_bundle_create(struct xfrm_policy
*policy
,
2523 struct xfrm_state
**xfrm
,
2524 struct xfrm_dst
**bundle
,
2526 const struct flowi
*fl
,
2527 struct dst_entry
*dst
)
2529 const struct xfrm_state_afinfo
*afinfo
;
2530 const struct xfrm_mode
*inner_mode
;
2531 struct net
*net
= xp_net(policy
);
2532 unsigned long now
= jiffies
;
2533 struct net_device
*dev
;
2534 struct xfrm_dst
*xdst_prev
= NULL
;
2535 struct xfrm_dst
*xdst0
= NULL
;
2539 int nfheader_len
= 0;
2540 int trailer_len
= 0;
2542 int family
= policy
->selector
.family
;
2543 xfrm_address_t saddr
, daddr
;
2545 xfrm_flowi_addr_get(fl
, &saddr
, &daddr
, family
);
2547 tos
= xfrm_get_tos(fl
, family
);
2551 for (; i
< nx
; i
++) {
2552 struct xfrm_dst
*xdst
= xfrm_alloc_dst(net
, family
);
2553 struct dst_entry
*dst1
= &xdst
->u
.dst
;
2555 err
= PTR_ERR(xdst
);
2565 /* Ref count is taken during xfrm_alloc_dst()
2566 * No need to do dst_clone() on dst1
2568 xfrm_dst_set_child(xdst_prev
, &xdst
->u
.dst
);
2570 if (xfrm
[i
]->sel
.family
== AF_UNSPEC
) {
2571 inner_mode
= xfrm_ip2inner_mode(xfrm
[i
],
2572 xfrm_af2proto(family
));
2574 err
= -EAFNOSUPPORT
;
2579 inner_mode
= &xfrm
[i
]->inner_mode
;
2582 dst_copy_metrics(dst1
, dst
);
2584 if (xfrm
[i
]->props
.mode
!= XFRM_MODE_TRANSPORT
) {
2587 if (xfrm
[i
]->props
.smark
.v
|| xfrm
[i
]->props
.smark
.m
)
2588 mark
= xfrm_smark_get(fl
->flowi_mark
, xfrm
[i
]);
2590 family
= xfrm
[i
]->props
.family
;
2591 dst
= xfrm_dst_lookup(xfrm
[i
], tos
, fl
->flowi_oif
,
2592 &saddr
, &daddr
, family
, mark
);
2599 dst1
->xfrm
= xfrm
[i
];
2600 xdst
->xfrm_genid
= xfrm
[i
]->genid
;
2602 dst1
->obsolete
= DST_OBSOLETE_FORCE_CHK
;
2603 dst1
->flags
|= DST_HOST
;
2604 dst1
->lastuse
= now
;
2606 dst1
->input
= dst_discard
;
2609 afinfo
= xfrm_state_afinfo_get_rcu(inner_mode
->family
);
2611 dst1
->output
= afinfo
->output
;
2613 dst1
->output
= dst_discard_out
;
2618 header_len
+= xfrm
[i
]->props
.header_len
;
2619 if (xfrm
[i
]->type
->flags
& XFRM_TYPE_NON_FRAGMENT
)
2620 nfheader_len
+= xfrm
[i
]->props
.header_len
;
2621 trailer_len
+= xfrm
[i
]->props
.trailer_len
;
2624 xfrm_dst_set_child(xdst_prev
, dst
);
2632 xfrm_init_path(xdst0
, dst
, nfheader_len
);
2633 xfrm_init_pmtu(bundle
, nx
);
2635 for (xdst_prev
= xdst0
; xdst_prev
!= (struct xfrm_dst
*)dst
;
2636 xdst_prev
= (struct xfrm_dst
*) xfrm_dst_child(&xdst_prev
->u
.dst
)) {
2637 err
= xfrm_fill_dst(xdst_prev
, dev
, fl
);
2641 xdst_prev
->u
.dst
.header_len
= header_len
;
2642 xdst_prev
->u
.dst
.trailer_len
= trailer_len
;
2643 header_len
-= xdst_prev
->u
.dst
.xfrm
->props
.header_len
;
2644 trailer_len
-= xdst_prev
->u
.dst
.xfrm
->props
.trailer_len
;
2647 return &xdst0
->u
.dst
;
2651 xfrm_state_put(xfrm
[i
]);
2654 dst_release_immediate(&xdst0
->u
.dst
);
2656 return ERR_PTR(err
);
2659 static int xfrm_expand_policies(const struct flowi
*fl
, u16 family
,
2660 struct xfrm_policy
**pols
,
2661 int *num_pols
, int *num_xfrms
)
2665 if (*num_pols
== 0 || !pols
[0]) {
2670 if (IS_ERR(pols
[0]))
2671 return PTR_ERR(pols
[0]);
2673 *num_xfrms
= pols
[0]->xfrm_nr
;
2675 #ifdef CONFIG_XFRM_SUB_POLICY
2676 if (pols
[0] && pols
[0]->action
== XFRM_POLICY_ALLOW
&&
2677 pols
[0]->type
!= XFRM_POLICY_TYPE_MAIN
) {
2678 pols
[1] = xfrm_policy_lookup_bytype(xp_net(pols
[0]),
2679 XFRM_POLICY_TYPE_MAIN
,
2684 if (IS_ERR(pols
[1])) {
2685 xfrm_pols_put(pols
, *num_pols
);
2686 return PTR_ERR(pols
[1]);
2689 (*num_xfrms
) += pols
[1]->xfrm_nr
;
2693 for (i
= 0; i
< *num_pols
; i
++) {
2694 if (pols
[i
]->action
!= XFRM_POLICY_ALLOW
) {
2704 static struct xfrm_dst
*
2705 xfrm_resolve_and_create_bundle(struct xfrm_policy
**pols
, int num_pols
,
2706 const struct flowi
*fl
, u16 family
,
2707 struct dst_entry
*dst_orig
)
2709 struct net
*net
= xp_net(pols
[0]);
2710 struct xfrm_state
*xfrm
[XFRM_MAX_DEPTH
];
2711 struct xfrm_dst
*bundle
[XFRM_MAX_DEPTH
];
2712 struct xfrm_dst
*xdst
;
2713 struct dst_entry
*dst
;
2716 /* Try to instantiate a bundle */
2717 err
= xfrm_tmpl_resolve(pols
, num_pols
, fl
, xfrm
, family
);
2723 XFRM_INC_STATS(net
, LINUX_MIB_XFRMOUTPOLERROR
);
2724 return ERR_PTR(err
);
2727 dst
= xfrm_bundle_create(pols
[0], xfrm
, bundle
, err
, fl
, dst_orig
);
2729 XFRM_INC_STATS(net
, LINUX_MIB_XFRMOUTBUNDLEGENERROR
);
2730 return ERR_CAST(dst
);
2733 xdst
= (struct xfrm_dst
*)dst
;
2734 xdst
->num_xfrms
= err
;
2735 xdst
->num_pols
= num_pols
;
2736 memcpy(xdst
->pols
, pols
, sizeof(struct xfrm_policy
*) * num_pols
);
2737 xdst
->policy_genid
= atomic_read(&pols
[0]->genid
);
2742 static void xfrm_policy_queue_process(struct timer_list
*t
)
2744 struct sk_buff
*skb
;
2746 struct dst_entry
*dst
;
2747 struct xfrm_policy
*pol
= from_timer(pol
, t
, polq
.hold_timer
);
2748 struct net
*net
= xp_net(pol
);
2749 struct xfrm_policy_queue
*pq
= &pol
->polq
;
2751 struct sk_buff_head list
;
2753 spin_lock(&pq
->hold_queue
.lock
);
2754 skb
= skb_peek(&pq
->hold_queue
);
2756 spin_unlock(&pq
->hold_queue
.lock
);
2761 xfrm_decode_session(skb
, &fl
, dst
->ops
->family
);
2762 spin_unlock(&pq
->hold_queue
.lock
);
2764 dst_hold(xfrm_dst_path(dst
));
2765 dst
= xfrm_lookup(net
, xfrm_dst_path(dst
), &fl
, sk
, XFRM_LOOKUP_QUEUE
);
2769 if (dst
->flags
& DST_XFRM_QUEUE
) {
2772 if (pq
->timeout
>= XFRM_QUEUE_TMO_MAX
)
2775 pq
->timeout
= pq
->timeout
<< 1;
2776 if (!mod_timer(&pq
->hold_timer
, jiffies
+ pq
->timeout
))
2783 __skb_queue_head_init(&list
);
2785 spin_lock(&pq
->hold_queue
.lock
);
2787 skb_queue_splice_init(&pq
->hold_queue
, &list
);
2788 spin_unlock(&pq
->hold_queue
.lock
);
2790 while (!skb_queue_empty(&list
)) {
2791 skb
= __skb_dequeue(&list
);
2793 xfrm_decode_session(skb
, &fl
, skb_dst(skb
)->ops
->family
);
2794 dst_hold(xfrm_dst_path(skb_dst(skb
)));
2795 dst
= xfrm_lookup(net
, xfrm_dst_path(skb_dst(skb
)), &fl
, skb
->sk
, 0);
2803 skb_dst_set(skb
, dst
);
2805 dst_output(net
, skb
->sk
, skb
);
2814 skb_queue_purge(&pq
->hold_queue
);
2818 static int xdst_queue_output(struct net
*net
, struct sock
*sk
, struct sk_buff
*skb
)
2820 unsigned long sched_next
;
2821 struct dst_entry
*dst
= skb_dst(skb
);
2822 struct xfrm_dst
*xdst
= (struct xfrm_dst
*) dst
;
2823 struct xfrm_policy
*pol
= xdst
->pols
[0];
2824 struct xfrm_policy_queue
*pq
= &pol
->polq
;
2826 if (unlikely(skb_fclone_busy(sk
, skb
))) {
2831 if (pq
->hold_queue
.qlen
> XFRM_MAX_QUEUE_LEN
) {
2838 spin_lock_bh(&pq
->hold_queue
.lock
);
2841 pq
->timeout
= XFRM_QUEUE_TMO_MIN
;
2843 sched_next
= jiffies
+ pq
->timeout
;
2845 if (del_timer(&pq
->hold_timer
)) {
2846 if (time_before(pq
->hold_timer
.expires
, sched_next
))
2847 sched_next
= pq
->hold_timer
.expires
;
2851 __skb_queue_tail(&pq
->hold_queue
, skb
);
2852 if (!mod_timer(&pq
->hold_timer
, sched_next
))
2855 spin_unlock_bh(&pq
->hold_queue
.lock
);
2860 static struct xfrm_dst
*xfrm_create_dummy_bundle(struct net
*net
,
2861 struct xfrm_flo
*xflo
,
2862 const struct flowi
*fl
,
2867 struct net_device
*dev
;
2868 struct dst_entry
*dst
;
2869 struct dst_entry
*dst1
;
2870 struct xfrm_dst
*xdst
;
2872 xdst
= xfrm_alloc_dst(net
, family
);
2876 if (!(xflo
->flags
& XFRM_LOOKUP_QUEUE
) ||
2877 net
->xfrm
.sysctl_larval_drop
||
2881 dst
= xflo
->dst_orig
;
2882 dst1
= &xdst
->u
.dst
;
2886 dst_copy_metrics(dst1
, dst
);
2888 dst1
->obsolete
= DST_OBSOLETE_FORCE_CHK
;
2889 dst1
->flags
|= DST_HOST
| DST_XFRM_QUEUE
;
2890 dst1
->lastuse
= jiffies
;
2892 dst1
->input
= dst_discard
;
2893 dst1
->output
= xdst_queue_output
;
2896 xfrm_dst_set_child(xdst
, dst
);
2899 xfrm_init_path((struct xfrm_dst
*)dst1
, dst
, 0);
2906 err
= xfrm_fill_dst(xdst
, dev
, fl
);
2915 xdst
= ERR_PTR(err
);
2919 static struct xfrm_dst
*xfrm_bundle_lookup(struct net
*net
,
2920 const struct flowi
*fl
,
2922 struct xfrm_flo
*xflo
, u32 if_id
)
2924 struct xfrm_policy
*pols
[XFRM_POLICY_TYPE_MAX
];
2925 int num_pols
= 0, num_xfrms
= 0, err
;
2926 struct xfrm_dst
*xdst
;
2928 /* Resolve policies to use if we couldn't get them from
2929 * previous cache entry */
2931 pols
[0] = xfrm_policy_lookup(net
, fl
, family
, dir
, if_id
);
2932 err
= xfrm_expand_policies(fl
, family
, pols
,
2933 &num_pols
, &num_xfrms
);
2939 goto make_dummy_bundle
;
2941 xdst
= xfrm_resolve_and_create_bundle(pols
, num_pols
, fl
, family
,
2944 err
= PTR_ERR(xdst
);
2945 if (err
== -EREMOTE
) {
2946 xfrm_pols_put(pols
, num_pols
);
2952 goto make_dummy_bundle
;
2953 } else if (xdst
== NULL
) {
2955 goto make_dummy_bundle
;
2961 /* We found policies, but there's no bundles to instantiate:
2962 * either because the policy blocks, has no transformations or
2963 * we could not build template (no xfrm_states).*/
2964 xdst
= xfrm_create_dummy_bundle(net
, xflo
, fl
, num_xfrms
, family
);
2966 xfrm_pols_put(pols
, num_pols
);
2967 return ERR_CAST(xdst
);
2969 xdst
->num_pols
= num_pols
;
2970 xdst
->num_xfrms
= num_xfrms
;
2971 memcpy(xdst
->pols
, pols
, sizeof(struct xfrm_policy
*) * num_pols
);
2976 XFRM_INC_STATS(net
, LINUX_MIB_XFRMOUTPOLERROR
);
2978 xfrm_pols_put(pols
, num_pols
);
2979 return ERR_PTR(err
);
2982 static struct dst_entry
*make_blackhole(struct net
*net
, u16 family
,
2983 struct dst_entry
*dst_orig
)
2985 const struct xfrm_policy_afinfo
*afinfo
= xfrm_policy_get_afinfo(family
);
2986 struct dst_entry
*ret
;
2989 dst_release(dst_orig
);
2990 return ERR_PTR(-EINVAL
);
2992 ret
= afinfo
->blackhole_route(net
, dst_orig
);
2999 /* Finds/creates a bundle for given flow and if_id
3001 * At the moment we eat a raw IP route. Mostly to speed up lookups
3002 * on interfaces with disabled IPsec.
3004 * xfrm_lookup uses an if_id of 0 by default, and is provided for
3007 struct dst_entry
*xfrm_lookup_with_ifid(struct net
*net
,
3008 struct dst_entry
*dst_orig
,
3009 const struct flowi
*fl
,
3010 const struct sock
*sk
,
3011 int flags
, u32 if_id
)
3013 struct xfrm_policy
*pols
[XFRM_POLICY_TYPE_MAX
];
3014 struct xfrm_dst
*xdst
;
3015 struct dst_entry
*dst
, *route
;
3016 u16 family
= dst_orig
->ops
->family
;
3017 u8 dir
= XFRM_POLICY_OUT
;
3018 int i
, err
, num_pols
, num_xfrms
= 0, drop_pols
= 0;
3024 sk
= sk_const_to_full_sk(sk
);
3025 if (sk
&& sk
->sk_policy
[XFRM_POLICY_OUT
]) {
3027 pols
[0] = xfrm_sk_policy_lookup(sk
, XFRM_POLICY_OUT
, fl
, family
,
3029 err
= xfrm_expand_policies(fl
, family
, pols
,
3030 &num_pols
, &num_xfrms
);
3035 if (num_xfrms
<= 0) {
3036 drop_pols
= num_pols
;
3040 xdst
= xfrm_resolve_and_create_bundle(
3045 xfrm_pols_put(pols
, num_pols
);
3046 err
= PTR_ERR(xdst
);
3047 if (err
== -EREMOTE
)
3051 } else if (xdst
== NULL
) {
3053 drop_pols
= num_pols
;
3057 route
= xdst
->route
;
3062 struct xfrm_flo xflo
;
3064 xflo
.dst_orig
= dst_orig
;
3067 /* To accelerate a bit... */
3068 if ((dst_orig
->flags
& DST_NOXFRM
) ||
3069 !net
->xfrm
.policy_count
[XFRM_POLICY_OUT
])
3072 xdst
= xfrm_bundle_lookup(net
, fl
, family
, dir
, &xflo
, if_id
);
3076 err
= PTR_ERR(xdst
);
3080 num_pols
= xdst
->num_pols
;
3081 num_xfrms
= xdst
->num_xfrms
;
3082 memcpy(pols
, xdst
->pols
, sizeof(struct xfrm_policy
*) * num_pols
);
3083 route
= xdst
->route
;
3087 if (route
== NULL
&& num_xfrms
> 0) {
3088 /* The only case when xfrm_bundle_lookup() returns a
3089 * bundle with null route, is when the template could
3090 * not be resolved. It means policies are there, but
3091 * bundle could not be created, since we don't yet
3092 * have the xfrm_state's. We need to wait for KM to
3093 * negotiate new SA's or bail out with error.*/
3094 if (net
->xfrm
.sysctl_larval_drop
) {
3095 XFRM_INC_STATS(net
, LINUX_MIB_XFRMOUTNOSTATES
);
3102 XFRM_INC_STATS(net
, LINUX_MIB_XFRMOUTNOSTATES
);
3110 if ((flags
& XFRM_LOOKUP_ICMP
) &&
3111 !(pols
[0]->flags
& XFRM_POLICY_ICMP
)) {
3116 for (i
= 0; i
< num_pols
; i
++)
3117 pols
[i
]->curlft
.use_time
= ktime_get_real_seconds();
3119 if (num_xfrms
< 0) {
3120 /* Prohibit the flow */
3121 XFRM_INC_STATS(net
, LINUX_MIB_XFRMOUTPOLBLOCK
);
3124 } else if (num_xfrms
> 0) {
3125 /* Flow transformed */
3126 dst_release(dst_orig
);
3128 /* Flow passes untransformed */
3133 xfrm_pols_put(pols
, drop_pols
);
3134 if (dst
&& dst
->xfrm
&&
3135 dst
->xfrm
->props
.mode
== XFRM_MODE_TUNNEL
)
3136 dst
->flags
|= DST_XFRM_TUNNEL
;
3140 if (!(flags
& XFRM_LOOKUP_ICMP
)) {
3148 if (!(flags
& XFRM_LOOKUP_KEEP_DST_REF
))
3149 dst_release(dst_orig
);
3150 xfrm_pols_put(pols
, drop_pols
);
3151 return ERR_PTR(err
);
3153 EXPORT_SYMBOL(xfrm_lookup_with_ifid
);
3155 /* Main function: finds/creates a bundle for given flow.
3157 * At the moment we eat a raw IP route. Mostly to speed up lookups
3158 * on interfaces with disabled IPsec.
3160 struct dst_entry
*xfrm_lookup(struct net
*net
, struct dst_entry
*dst_orig
,
3161 const struct flowi
*fl
, const struct sock
*sk
,
3164 return xfrm_lookup_with_ifid(net
, dst_orig
, fl
, sk
, flags
, 0);
3166 EXPORT_SYMBOL(xfrm_lookup
);
3168 /* Callers of xfrm_lookup_route() must ensure a call to dst_output().
3169 * Otherwise we may send out blackholed packets.
3171 struct dst_entry
*xfrm_lookup_route(struct net
*net
, struct dst_entry
*dst_orig
,
3172 const struct flowi
*fl
,
3173 const struct sock
*sk
, int flags
)
3175 struct dst_entry
*dst
= xfrm_lookup(net
, dst_orig
, fl
, sk
,
3176 flags
| XFRM_LOOKUP_QUEUE
|
3177 XFRM_LOOKUP_KEEP_DST_REF
);
3179 if (IS_ERR(dst
) && PTR_ERR(dst
) == -EREMOTE
)
3180 return make_blackhole(net
, dst_orig
->ops
->family
, dst_orig
);
3183 dst_release(dst_orig
);
3187 EXPORT_SYMBOL(xfrm_lookup_route
);
3190 xfrm_secpath_reject(int idx
, struct sk_buff
*skb
, const struct flowi
*fl
)
3192 struct sec_path
*sp
= skb_sec_path(skb
);
3193 struct xfrm_state
*x
;
3195 if (!sp
|| idx
< 0 || idx
>= sp
->len
)
3198 if (!x
->type
->reject
)
3200 return x
->type
->reject(x
, skb
, fl
);
3203 /* When skb is transformed back to its "native" form, we have to
3204 * check policy restrictions. At the moment we make this in maximally
3205 * stupid way. Shame on me. :-) Of course, connected sockets must
3206 * have policy cached at them.
3210 xfrm_state_ok(const struct xfrm_tmpl
*tmpl
, const struct xfrm_state
*x
,
3211 unsigned short family
)
3213 if (xfrm_state_kern(x
))
3214 return tmpl
->optional
&& !xfrm_state_addr_cmp(tmpl
, x
, tmpl
->encap_family
);
3215 return x
->id
.proto
== tmpl
->id
.proto
&&
3216 (x
->id
.spi
== tmpl
->id
.spi
|| !tmpl
->id
.spi
) &&
3217 (x
->props
.reqid
== tmpl
->reqid
|| !tmpl
->reqid
) &&
3218 x
->props
.mode
== tmpl
->mode
&&
3219 (tmpl
->allalgs
|| (tmpl
->aalgos
& (1<<x
->props
.aalgo
)) ||
3220 !(xfrm_id_proto_match(tmpl
->id
.proto
, IPSEC_PROTO_ANY
))) &&
3221 !(x
->props
.mode
!= XFRM_MODE_TRANSPORT
&&
3222 xfrm_state_addr_cmp(tmpl
, x
, family
));
3226 * 0 or more than 0 is returned when validation is succeeded (either bypass
3227 * because of optional transport mode, or next index of the mathced secpath
3228 * state with the template.
3229 * -1 is returned when no matching template is found.
3230 * Otherwise "-2 - errored_index" is returned.
3233 xfrm_policy_ok(const struct xfrm_tmpl
*tmpl
, const struct sec_path
*sp
, int start
,
3234 unsigned short family
)
3238 if (tmpl
->optional
) {
3239 if (tmpl
->mode
== XFRM_MODE_TRANSPORT
)
3243 for (; idx
< sp
->len
; idx
++) {
3244 if (xfrm_state_ok(tmpl
, sp
->xvec
[idx
], family
))
3246 if (sp
->xvec
[idx
]->props
.mode
!= XFRM_MODE_TRANSPORT
) {
3256 decode_session4(struct sk_buff
*skb
, struct flowi
*fl
, bool reverse
)
3258 const struct iphdr
*iph
= ip_hdr(skb
);
3260 u8
*xprth
= skb_network_header(skb
) + ihl
* 4;
3261 struct flowi4
*fl4
= &fl
->u
.ip4
;
3264 if (skb_dst(skb
) && skb_dst(skb
)->dev
)
3265 oif
= skb_dst(skb
)->dev
->ifindex
;
3267 memset(fl4
, 0, sizeof(struct flowi4
));
3268 fl4
->flowi4_mark
= skb
->mark
;
3269 fl4
->flowi4_oif
= reverse
? skb
->skb_iif
: oif
;
3271 fl4
->flowi4_proto
= iph
->protocol
;
3272 fl4
->daddr
= reverse
? iph
->saddr
: iph
->daddr
;
3273 fl4
->saddr
= reverse
? iph
->daddr
: iph
->saddr
;
3274 fl4
->flowi4_tos
= iph
->tos
;
3276 if (!ip_is_fragment(iph
)) {
3277 switch (iph
->protocol
) {
3279 case IPPROTO_UDPLITE
:
3283 if (xprth
+ 4 < skb
->data
||
3284 pskb_may_pull(skb
, xprth
+ 4 - skb
->data
)) {
3287 xprth
= skb_network_header(skb
) + ihl
* 4;
3288 ports
= (__be16
*)xprth
;
3290 fl4
->fl4_sport
= ports
[!!reverse
];
3291 fl4
->fl4_dport
= ports
[!reverse
];
3295 if (xprth
+ 2 < skb
->data
||
3296 pskb_may_pull(skb
, xprth
+ 2 - skb
->data
)) {
3299 xprth
= skb_network_header(skb
) + ihl
* 4;
3302 fl4
->fl4_icmp_type
= icmp
[0];
3303 fl4
->fl4_icmp_code
= icmp
[1];
3307 if (xprth
+ 4 < skb
->data
||
3308 pskb_may_pull(skb
, xprth
+ 4 - skb
->data
)) {
3311 xprth
= skb_network_header(skb
) + ihl
* 4;
3312 ehdr
= (__be32
*)xprth
;
3314 fl4
->fl4_ipsec_spi
= ehdr
[0];
3318 if (xprth
+ 8 < skb
->data
||
3319 pskb_may_pull(skb
, xprth
+ 8 - skb
->data
)) {
3322 xprth
= skb_network_header(skb
) + ihl
* 4;
3323 ah_hdr
= (__be32
*)xprth
;
3325 fl4
->fl4_ipsec_spi
= ah_hdr
[1];
3329 if (xprth
+ 4 < skb
->data
||
3330 pskb_may_pull(skb
, xprth
+ 4 - skb
->data
)) {
3333 xprth
= skb_network_header(skb
) + ihl
* 4;
3334 ipcomp_hdr
= (__be16
*)xprth
;
3336 fl4
->fl4_ipsec_spi
= htonl(ntohs(ipcomp_hdr
[1]));
3340 if (xprth
+ 12 < skb
->data
||
3341 pskb_may_pull(skb
, xprth
+ 12 - skb
->data
)) {
3345 xprth
= skb_network_header(skb
) + ihl
* 4;
3346 greflags
= (__be16
*)xprth
;
3347 gre_hdr
= (__be32
*)xprth
;
3349 if (greflags
[0] & GRE_KEY
) {
3350 if (greflags
[0] & GRE_CSUM
)
3352 fl4
->fl4_gre_key
= gre_hdr
[1];
3357 fl4
->fl4_ipsec_spi
= 0;
3363 #if IS_ENABLED(CONFIG_IPV6)
3365 decode_session6(struct sk_buff
*skb
, struct flowi
*fl
, bool reverse
)
3367 struct flowi6
*fl6
= &fl
->u
.ip6
;
3369 const struct ipv6hdr
*hdr
= ipv6_hdr(skb
);
3370 u32 offset
= sizeof(*hdr
);
3371 struct ipv6_opt_hdr
*exthdr
;
3372 const unsigned char *nh
= skb_network_header(skb
);
3373 u16 nhoff
= IP6CB(skb
)->nhoff
;
3378 nhoff
= offsetof(struct ipv6hdr
, nexthdr
);
3380 nexthdr
= nh
[nhoff
];
3382 if (skb_dst(skb
) && skb_dst(skb
)->dev
)
3383 oif
= skb_dst(skb
)->dev
->ifindex
;
3385 memset(fl6
, 0, sizeof(struct flowi6
));
3386 fl6
->flowi6_mark
= skb
->mark
;
3387 fl6
->flowi6_oif
= reverse
? skb
->skb_iif
: oif
;
3389 fl6
->daddr
= reverse
? hdr
->saddr
: hdr
->daddr
;
3390 fl6
->saddr
= reverse
? hdr
->daddr
: hdr
->saddr
;
3392 while (nh
+ offset
+ sizeof(*exthdr
) < skb
->data
||
3393 pskb_may_pull(skb
, nh
+ offset
+ sizeof(*exthdr
) - skb
->data
)) {
3394 nh
= skb_network_header(skb
);
3395 exthdr
= (struct ipv6_opt_hdr
*)(nh
+ offset
);
3398 case NEXTHDR_FRAGMENT
:
3401 case NEXTHDR_ROUTING
:
3404 offset
+= ipv6_optlen(exthdr
);
3405 nexthdr
= exthdr
->nexthdr
;
3406 exthdr
= (struct ipv6_opt_hdr
*)(nh
+ offset
);
3409 case IPPROTO_UDPLITE
:
3413 if (!onlyproto
&& (nh
+ offset
+ 4 < skb
->data
||
3414 pskb_may_pull(skb
, nh
+ offset
+ 4 - skb
->data
))) {
3417 nh
= skb_network_header(skb
);
3418 ports
= (__be16
*)(nh
+ offset
);
3419 fl6
->fl6_sport
= ports
[!!reverse
];
3420 fl6
->fl6_dport
= ports
[!reverse
];
3422 fl6
->flowi6_proto
= nexthdr
;
3424 case IPPROTO_ICMPV6
:
3425 if (!onlyproto
&& (nh
+ offset
+ 2 < skb
->data
||
3426 pskb_may_pull(skb
, nh
+ offset
+ 2 - skb
->data
))) {
3429 nh
= skb_network_header(skb
);
3430 icmp
= (u8
*)(nh
+ offset
);
3431 fl6
->fl6_icmp_type
= icmp
[0];
3432 fl6
->fl6_icmp_code
= icmp
[1];
3434 fl6
->flowi6_proto
= nexthdr
;
3436 #if IS_ENABLED(CONFIG_IPV6_MIP6)
3438 offset
+= ipv6_optlen(exthdr
);
3439 if (!onlyproto
&& (nh
+ offset
+ 3 < skb
->data
||
3440 pskb_may_pull(skb
, nh
+ offset
+ 3 - skb
->data
))) {
3443 nh
= skb_network_header(skb
);
3444 mh
= (struct ip6_mh
*)(nh
+ offset
);
3445 fl6
->fl6_mh_type
= mh
->ip6mh_type
;
3447 fl6
->flowi6_proto
= nexthdr
;
3450 /* XXX Why are there these headers? */
3455 fl6
->fl6_ipsec_spi
= 0;
3456 fl6
->flowi6_proto
= nexthdr
;
3463 int __xfrm_decode_session(struct sk_buff
*skb
, struct flowi
*fl
,
3464 unsigned int family
, int reverse
)
3468 decode_session4(skb
, fl
, reverse
);
3470 #if IS_ENABLED(CONFIG_IPV6)
3472 decode_session6(skb
, fl
, reverse
);
3476 return -EAFNOSUPPORT
;
3479 return security_xfrm_decode_session(skb
, &fl
->flowi_secid
);
3481 EXPORT_SYMBOL(__xfrm_decode_session
);
3483 static inline int secpath_has_nontransport(const struct sec_path
*sp
, int k
, int *idxp
)
3485 for (; k
< sp
->len
; k
++) {
3486 if (sp
->xvec
[k
]->props
.mode
!= XFRM_MODE_TRANSPORT
) {
3495 int __xfrm_policy_check(struct sock
*sk
, int dir
, struct sk_buff
*skb
,
3496 unsigned short family
)
3498 struct net
*net
= dev_net(skb
->dev
);
3499 struct xfrm_policy
*pol
;
3500 struct xfrm_policy
*pols
[XFRM_POLICY_TYPE_MAX
];
3507 const struct xfrm_if_cb
*ifcb
;
3508 struct sec_path
*sp
;
3513 ifcb
= xfrm_if_get_cb();
3516 xi
= ifcb
->decode_session(skb
, family
);
3518 if_id
= xi
->p
.if_id
;
3524 reverse
= dir
& ~XFRM_POLICY_MASK
;
3525 dir
&= XFRM_POLICY_MASK
;
3527 if (__xfrm_decode_session(skb
, &fl
, family
, reverse
) < 0) {
3528 XFRM_INC_STATS(net
, LINUX_MIB_XFRMINHDRERROR
);
3532 nf_nat_decode_session(skb
, &fl
, family
);
3534 /* First, check used SA against their selectors. */
3535 sp
= skb_sec_path(skb
);
3539 for (i
= sp
->len
- 1; i
>= 0; i
--) {
3540 struct xfrm_state
*x
= sp
->xvec
[i
];
3541 if (!xfrm_selector_match(&x
->sel
, &fl
, family
)) {
3542 XFRM_INC_STATS(net
, LINUX_MIB_XFRMINSTATEMISMATCH
);
3549 sk
= sk_to_full_sk(sk
);
3550 if (sk
&& sk
->sk_policy
[dir
]) {
3551 pol
= xfrm_sk_policy_lookup(sk
, dir
, &fl
, family
, if_id
);
3553 XFRM_INC_STATS(net
, LINUX_MIB_XFRMINPOLERROR
);
3559 pol
= xfrm_policy_lookup(net
, &fl
, family
, dir
, if_id
);
3562 XFRM_INC_STATS(net
, LINUX_MIB_XFRMINPOLERROR
);
3567 if (sp
&& secpath_has_nontransport(sp
, 0, &xerr_idx
)) {
3568 xfrm_secpath_reject(xerr_idx
, skb
, &fl
);
3569 XFRM_INC_STATS(net
, LINUX_MIB_XFRMINNOPOLS
);
3575 pol
->curlft
.use_time
= ktime_get_real_seconds();
3579 #ifdef CONFIG_XFRM_SUB_POLICY
3580 if (pols
[0]->type
!= XFRM_POLICY_TYPE_MAIN
) {
3581 pols
[1] = xfrm_policy_lookup_bytype(net
, XFRM_POLICY_TYPE_MAIN
,
3583 XFRM_POLICY_IN
, if_id
);
3585 if (IS_ERR(pols
[1])) {
3586 XFRM_INC_STATS(net
, LINUX_MIB_XFRMINPOLERROR
);
3589 pols
[1]->curlft
.use_time
= ktime_get_real_seconds();
3595 if (pol
->action
== XFRM_POLICY_ALLOW
) {
3596 static struct sec_path dummy
;
3597 struct xfrm_tmpl
*tp
[XFRM_MAX_DEPTH
];
3598 struct xfrm_tmpl
*stp
[XFRM_MAX_DEPTH
];
3599 struct xfrm_tmpl
**tpp
= tp
;
3603 sp
= skb_sec_path(skb
);
3607 for (pi
= 0; pi
< npols
; pi
++) {
3608 if (pols
[pi
] != pol
&&
3609 pols
[pi
]->action
!= XFRM_POLICY_ALLOW
) {
3610 XFRM_INC_STATS(net
, LINUX_MIB_XFRMINPOLBLOCK
);
3613 if (ti
+ pols
[pi
]->xfrm_nr
>= XFRM_MAX_DEPTH
) {
3614 XFRM_INC_STATS(net
, LINUX_MIB_XFRMINBUFFERERROR
);
3617 for (i
= 0; i
< pols
[pi
]->xfrm_nr
; i
++)
3618 tpp
[ti
++] = &pols
[pi
]->xfrm_vec
[i
];
3622 xfrm_tmpl_sort(stp
, tpp
, xfrm_nr
, family
);
3626 /* For each tunnel xfrm, find the first matching tmpl.
3627 * For each tmpl before that, find corresponding xfrm.
3628 * Order is _important_. Later we will implement
3629 * some barriers, but at the moment barriers
3630 * are implied between each two transformations.
3632 for (i
= xfrm_nr
-1, k
= 0; i
>= 0; i
--) {
3633 k
= xfrm_policy_ok(tpp
[i
], sp
, k
, family
);
3636 /* "-2 - errored_index" returned */
3638 XFRM_INC_STATS(net
, LINUX_MIB_XFRMINTMPLMISMATCH
);
3643 if (secpath_has_nontransport(sp
, k
, &xerr_idx
)) {
3644 XFRM_INC_STATS(net
, LINUX_MIB_XFRMINTMPLMISMATCH
);
3648 xfrm_pols_put(pols
, npols
);
3651 XFRM_INC_STATS(net
, LINUX_MIB_XFRMINPOLBLOCK
);
3654 xfrm_secpath_reject(xerr_idx
, skb
, &fl
);
3656 xfrm_pols_put(pols
, npols
);
3659 EXPORT_SYMBOL(__xfrm_policy_check
);
3661 int __xfrm_route_forward(struct sk_buff
*skb
, unsigned short family
)
3663 struct net
*net
= dev_net(skb
->dev
);
3665 struct dst_entry
*dst
;
3668 if (xfrm_decode_session(skb
, &fl
, family
) < 0) {
3669 XFRM_INC_STATS(net
, LINUX_MIB_XFRMFWDHDRERROR
);
3674 if (!skb_dst(skb
)) {
3675 XFRM_INC_STATS(net
, LINUX_MIB_XFRMFWDHDRERROR
);
3679 dst
= xfrm_lookup(net
, skb_dst(skb
), &fl
, NULL
, XFRM_LOOKUP_QUEUE
);
3684 skb_dst_set(skb
, dst
);
3687 EXPORT_SYMBOL(__xfrm_route_forward
);
3689 /* Optimize later using cookies and generation ids. */
3691 static struct dst_entry
*xfrm_dst_check(struct dst_entry
*dst
, u32 cookie
)
3693 /* Code (such as __xfrm4_bundle_create()) sets dst->obsolete
3694 * to DST_OBSOLETE_FORCE_CHK to force all XFRM destinations to
3695 * get validated by dst_ops->check on every use. We do this
3696 * because when a normal route referenced by an XFRM dst is
3697 * obsoleted we do not go looking around for all parent
3698 * referencing XFRM dsts so that we can invalidate them. It
3699 * is just too much work. Instead we make the checks here on
3700 * every use. For example:
3702 * XFRM dst A --> IPv4 dst X
3704 * X is the "xdst->route" of A (X is also the "dst->path" of A
3705 * in this example). If X is marked obsolete, "A" will not
3706 * notice. That's what we are validating here via the
3707 * stale_bundle() check.
3709 * When a dst is removed from the fib tree, DST_OBSOLETE_DEAD will
3711 * This will force stale_bundle() to fail on any xdst bundle with
3712 * this dst linked in it.
3714 if (dst
->obsolete
< 0 && !stale_bundle(dst
))
3720 static int stale_bundle(struct dst_entry
*dst
)
3722 return !xfrm_bundle_ok((struct xfrm_dst
*)dst
);
3725 void xfrm_dst_ifdown(struct dst_entry
*dst
, struct net_device
*dev
)
3727 while ((dst
= xfrm_dst_child(dst
)) && dst
->xfrm
&& dst
->dev
== dev
) {
3728 dst
->dev
= dev_net(dev
)->loopback_dev
;
3733 EXPORT_SYMBOL(xfrm_dst_ifdown
);
3735 static void xfrm_link_failure(struct sk_buff
*skb
)
3737 /* Impossible. Such dst must be popped before reaches point of failure. */
3740 static struct dst_entry
*xfrm_negative_advice(struct dst_entry
*dst
)
3743 if (dst
->obsolete
) {
3751 static void xfrm_init_pmtu(struct xfrm_dst
**bundle
, int nr
)
3754 struct xfrm_dst
*xdst
= bundle
[nr
];
3755 u32 pmtu
, route_mtu_cached
;
3756 struct dst_entry
*dst
;
3759 pmtu
= dst_mtu(xfrm_dst_child(dst
));
3760 xdst
->child_mtu_cached
= pmtu
;
3762 pmtu
= xfrm_state_mtu(dst
->xfrm
, pmtu
);
3764 route_mtu_cached
= dst_mtu(xdst
->route
);
3765 xdst
->route_mtu_cached
= route_mtu_cached
;
3767 if (pmtu
> route_mtu_cached
)
3768 pmtu
= route_mtu_cached
;
3770 dst_metric_set(dst
, RTAX_MTU
, pmtu
);
3774 /* Check that the bundle accepts the flow and its components are
3778 static int xfrm_bundle_ok(struct xfrm_dst
*first
)
3780 struct xfrm_dst
*bundle
[XFRM_MAX_DEPTH
];
3781 struct dst_entry
*dst
= &first
->u
.dst
;
3782 struct xfrm_dst
*xdst
;
3786 if (!dst_check(xfrm_dst_path(dst
), ((struct xfrm_dst
*)dst
)->path_cookie
) ||
3787 (dst
->dev
&& !netif_running(dst
->dev
)))
3790 if (dst
->flags
& DST_XFRM_QUEUE
)
3793 start_from
= nr
= 0;
3795 struct xfrm_dst
*xdst
= (struct xfrm_dst
*)dst
;
3797 if (dst
->xfrm
->km
.state
!= XFRM_STATE_VALID
)
3799 if (xdst
->xfrm_genid
!= dst
->xfrm
->genid
)
3801 if (xdst
->num_pols
> 0 &&
3802 xdst
->policy_genid
!= atomic_read(&xdst
->pols
[0]->genid
))
3805 bundle
[nr
++] = xdst
;
3807 mtu
= dst_mtu(xfrm_dst_child(dst
));
3808 if (xdst
->child_mtu_cached
!= mtu
) {
3810 xdst
->child_mtu_cached
= mtu
;
3813 if (!dst_check(xdst
->route
, xdst
->route_cookie
))
3815 mtu
= dst_mtu(xdst
->route
);
3816 if (xdst
->route_mtu_cached
!= mtu
) {
3818 xdst
->route_mtu_cached
= mtu
;
3821 dst
= xfrm_dst_child(dst
);
3822 } while (dst
->xfrm
);
3824 if (likely(!start_from
))
3827 xdst
= bundle
[start_from
- 1];
3828 mtu
= xdst
->child_mtu_cached
;
3829 while (start_from
--) {
3832 mtu
= xfrm_state_mtu(dst
->xfrm
, mtu
);
3833 if (mtu
> xdst
->route_mtu_cached
)
3834 mtu
= xdst
->route_mtu_cached
;
3835 dst_metric_set(dst
, RTAX_MTU
, mtu
);
3839 xdst
= bundle
[start_from
- 1];
3840 xdst
->child_mtu_cached
= mtu
;
3846 static unsigned int xfrm_default_advmss(const struct dst_entry
*dst
)
3848 return dst_metric_advmss(xfrm_dst_path(dst
));
3851 static unsigned int xfrm_mtu(const struct dst_entry
*dst
)
3853 unsigned int mtu
= dst_metric_raw(dst
, RTAX_MTU
);
3855 return mtu
? : dst_mtu(xfrm_dst_path(dst
));
3858 static const void *xfrm_get_dst_nexthop(const struct dst_entry
*dst
,
3862 const struct xfrm_state
*xfrm
= dst
->xfrm
;
3864 dst
= xfrm_dst_child(dst
);
3866 if (xfrm
->props
.mode
== XFRM_MODE_TRANSPORT
)
3868 if (xfrm
->type
->flags
& XFRM_TYPE_REMOTE_COADDR
)
3869 daddr
= xfrm
->coaddr
;
3870 else if (!(xfrm
->type
->flags
& XFRM_TYPE_LOCAL_COADDR
))
3871 daddr
= &xfrm
->id
.daddr
;
3876 static struct neighbour
*xfrm_neigh_lookup(const struct dst_entry
*dst
,
3877 struct sk_buff
*skb
,
3880 const struct dst_entry
*path
= xfrm_dst_path(dst
);
3883 daddr
= xfrm_get_dst_nexthop(dst
, daddr
);
3884 return path
->ops
->neigh_lookup(path
, skb
, daddr
);
3887 static void xfrm_confirm_neigh(const struct dst_entry
*dst
, const void *daddr
)
3889 const struct dst_entry
*path
= xfrm_dst_path(dst
);
3891 daddr
= xfrm_get_dst_nexthop(dst
, daddr
);
3892 path
->ops
->confirm_neigh(path
, daddr
);
3895 int xfrm_policy_register_afinfo(const struct xfrm_policy_afinfo
*afinfo
, int family
)
3899 if (WARN_ON(family
>= ARRAY_SIZE(xfrm_policy_afinfo
)))
3900 return -EAFNOSUPPORT
;
3902 spin_lock(&xfrm_policy_afinfo_lock
);
3903 if (unlikely(xfrm_policy_afinfo
[family
] != NULL
))
3906 struct dst_ops
*dst_ops
= afinfo
->dst_ops
;
3907 if (likely(dst_ops
->kmem_cachep
== NULL
))
3908 dst_ops
->kmem_cachep
= xfrm_dst_cache
;
3909 if (likely(dst_ops
->check
== NULL
))
3910 dst_ops
->check
= xfrm_dst_check
;
3911 if (likely(dst_ops
->default_advmss
== NULL
))
3912 dst_ops
->default_advmss
= xfrm_default_advmss
;
3913 if (likely(dst_ops
->mtu
== NULL
))
3914 dst_ops
->mtu
= xfrm_mtu
;
3915 if (likely(dst_ops
->negative_advice
== NULL
))
3916 dst_ops
->negative_advice
= xfrm_negative_advice
;
3917 if (likely(dst_ops
->link_failure
== NULL
))
3918 dst_ops
->link_failure
= xfrm_link_failure
;
3919 if (likely(dst_ops
->neigh_lookup
== NULL
))
3920 dst_ops
->neigh_lookup
= xfrm_neigh_lookup
;
3921 if (likely(!dst_ops
->confirm_neigh
))
3922 dst_ops
->confirm_neigh
= xfrm_confirm_neigh
;
3923 rcu_assign_pointer(xfrm_policy_afinfo
[family
], afinfo
);
3925 spin_unlock(&xfrm_policy_afinfo_lock
);
3929 EXPORT_SYMBOL(xfrm_policy_register_afinfo
);
3931 void xfrm_policy_unregister_afinfo(const struct xfrm_policy_afinfo
*afinfo
)
3933 struct dst_ops
*dst_ops
= afinfo
->dst_ops
;
3936 for (i
= 0; i
< ARRAY_SIZE(xfrm_policy_afinfo
); i
++) {
3937 if (xfrm_policy_afinfo
[i
] != afinfo
)
3939 RCU_INIT_POINTER(xfrm_policy_afinfo
[i
], NULL
);
3945 dst_ops
->kmem_cachep
= NULL
;
3946 dst_ops
->check
= NULL
;
3947 dst_ops
->negative_advice
= NULL
;
3948 dst_ops
->link_failure
= NULL
;
3950 EXPORT_SYMBOL(xfrm_policy_unregister_afinfo
);
3952 void xfrm_if_register_cb(const struct xfrm_if_cb
*ifcb
)
3954 spin_lock(&xfrm_if_cb_lock
);
3955 rcu_assign_pointer(xfrm_if_cb
, ifcb
);
3956 spin_unlock(&xfrm_if_cb_lock
);
3958 EXPORT_SYMBOL(xfrm_if_register_cb
);
3960 void xfrm_if_unregister_cb(void)
3962 RCU_INIT_POINTER(xfrm_if_cb
, NULL
);
3965 EXPORT_SYMBOL(xfrm_if_unregister_cb
);
3967 #ifdef CONFIG_XFRM_STATISTICS
3968 static int __net_init
xfrm_statistics_init(struct net
*net
)
3971 net
->mib
.xfrm_statistics
= alloc_percpu(struct linux_xfrm_mib
);
3972 if (!net
->mib
.xfrm_statistics
)
3974 rv
= xfrm_proc_init(net
);
3976 free_percpu(net
->mib
.xfrm_statistics
);
3980 static void xfrm_statistics_fini(struct net
*net
)
3982 xfrm_proc_fini(net
);
3983 free_percpu(net
->mib
.xfrm_statistics
);
3986 static int __net_init
xfrm_statistics_init(struct net
*net
)
3991 static void xfrm_statistics_fini(struct net
*net
)
3996 static int __net_init
xfrm_policy_init(struct net
*net
)
3998 unsigned int hmask
, sz
;
4001 if (net_eq(net
, &init_net
)) {
4002 xfrm_dst_cache
= kmem_cache_create("xfrm_dst_cache",
4003 sizeof(struct xfrm_dst
),
4004 0, SLAB_HWCACHE_ALIGN
|SLAB_PANIC
,
4006 err
= rhashtable_init(&xfrm_policy_inexact_table
,
4007 &xfrm_pol_inexact_params
);
4012 sz
= (hmask
+1) * sizeof(struct hlist_head
);
4014 net
->xfrm
.policy_byidx
= xfrm_hash_alloc(sz
);
4015 if (!net
->xfrm
.policy_byidx
)
4017 net
->xfrm
.policy_idx_hmask
= hmask
;
4019 for (dir
= 0; dir
< XFRM_POLICY_MAX
; dir
++) {
4020 struct xfrm_policy_hash
*htab
;
4022 net
->xfrm
.policy_count
[dir
] = 0;
4023 net
->xfrm
.policy_count
[XFRM_POLICY_MAX
+ dir
] = 0;
4024 INIT_HLIST_HEAD(&net
->xfrm
.policy_inexact
[dir
]);
4026 htab
= &net
->xfrm
.policy_bydst
[dir
];
4027 htab
->table
= xfrm_hash_alloc(sz
);
4030 htab
->hmask
= hmask
;
4036 net
->xfrm
.policy_hthresh
.lbits4
= 32;
4037 net
->xfrm
.policy_hthresh
.rbits4
= 32;
4038 net
->xfrm
.policy_hthresh
.lbits6
= 128;
4039 net
->xfrm
.policy_hthresh
.rbits6
= 128;
4041 seqlock_init(&net
->xfrm
.policy_hthresh
.lock
);
4043 INIT_LIST_HEAD(&net
->xfrm
.policy_all
);
4044 INIT_LIST_HEAD(&net
->xfrm
.inexact_bins
);
4045 INIT_WORK(&net
->xfrm
.policy_hash_work
, xfrm_hash_resize
);
4046 INIT_WORK(&net
->xfrm
.policy_hthresh
.work
, xfrm_hash_rebuild
);
4050 for (dir
--; dir
>= 0; dir
--) {
4051 struct xfrm_policy_hash
*htab
;
4053 htab
= &net
->xfrm
.policy_bydst
[dir
];
4054 xfrm_hash_free(htab
->table
, sz
);
4056 xfrm_hash_free(net
->xfrm
.policy_byidx
, sz
);
4061 static void xfrm_policy_fini(struct net
*net
)
4063 struct xfrm_pol_inexact_bin
*b
, *t
;
4067 flush_work(&net
->xfrm
.policy_hash_work
);
4068 #ifdef CONFIG_XFRM_SUB_POLICY
4069 xfrm_policy_flush(net
, XFRM_POLICY_TYPE_SUB
, false);
4071 xfrm_policy_flush(net
, XFRM_POLICY_TYPE_MAIN
, false);
4073 WARN_ON(!list_empty(&net
->xfrm
.policy_all
));
4075 for (dir
= 0; dir
< XFRM_POLICY_MAX
; dir
++) {
4076 struct xfrm_policy_hash
*htab
;
4078 WARN_ON(!hlist_empty(&net
->xfrm
.policy_inexact
[dir
]));
4080 htab
= &net
->xfrm
.policy_bydst
[dir
];
4081 sz
= (htab
->hmask
+ 1) * sizeof(struct hlist_head
);
4082 WARN_ON(!hlist_empty(htab
->table
));
4083 xfrm_hash_free(htab
->table
, sz
);
4086 sz
= (net
->xfrm
.policy_idx_hmask
+ 1) * sizeof(struct hlist_head
);
4087 WARN_ON(!hlist_empty(net
->xfrm
.policy_byidx
));
4088 xfrm_hash_free(net
->xfrm
.policy_byidx
, sz
);
4090 spin_lock_bh(&net
->xfrm
.xfrm_policy_lock
);
4091 list_for_each_entry_safe(b
, t
, &net
->xfrm
.inexact_bins
, inexact_bins
)
4092 __xfrm_policy_inexact_prune_bin(b
, true);
4093 spin_unlock_bh(&net
->xfrm
.xfrm_policy_lock
);
4096 static int __net_init
xfrm_net_init(struct net
*net
)
4100 /* Initialize the per-net locks here */
4101 spin_lock_init(&net
->xfrm
.xfrm_state_lock
);
4102 spin_lock_init(&net
->xfrm
.xfrm_policy_lock
);
4103 mutex_init(&net
->xfrm
.xfrm_cfg_mutex
);
4105 rv
= xfrm_statistics_init(net
);
4107 goto out_statistics
;
4108 rv
= xfrm_state_init(net
);
4111 rv
= xfrm_policy_init(net
);
4114 rv
= xfrm_sysctl_init(net
);
4121 xfrm_policy_fini(net
);
4123 xfrm_state_fini(net
);
4125 xfrm_statistics_fini(net
);
4130 static void __net_exit
xfrm_net_exit(struct net
*net
)
4132 xfrm_sysctl_fini(net
);
4133 xfrm_policy_fini(net
);
4134 xfrm_state_fini(net
);
4135 xfrm_statistics_fini(net
);
4138 static struct pernet_operations __net_initdata xfrm_net_ops
= {
4139 .init
= xfrm_net_init
,
4140 .exit
= xfrm_net_exit
,
4143 void __init
xfrm_init(void)
4145 register_pernet_subsys(&xfrm_net_ops
);
4147 seqcount_init(&xfrm_policy_hash_generation
);
4150 RCU_INIT_POINTER(xfrm_if_cb
, NULL
);
4154 #ifdef CONFIG_AUDITSYSCALL
4155 static void xfrm_audit_common_policyinfo(struct xfrm_policy
*xp
,
4156 struct audit_buffer
*audit_buf
)
4158 struct xfrm_sec_ctx
*ctx
= xp
->security
;
4159 struct xfrm_selector
*sel
= &xp
->selector
;
4162 audit_log_format(audit_buf
, " sec_alg=%u sec_doi=%u sec_obj=%s",
4163 ctx
->ctx_alg
, ctx
->ctx_doi
, ctx
->ctx_str
);
4165 switch (sel
->family
) {
4167 audit_log_format(audit_buf
, " src=%pI4", &sel
->saddr
.a4
);
4168 if (sel
->prefixlen_s
!= 32)
4169 audit_log_format(audit_buf
, " src_prefixlen=%d",
4171 audit_log_format(audit_buf
, " dst=%pI4", &sel
->daddr
.a4
);
4172 if (sel
->prefixlen_d
!= 32)
4173 audit_log_format(audit_buf
, " dst_prefixlen=%d",
4177 audit_log_format(audit_buf
, " src=%pI6", sel
->saddr
.a6
);
4178 if (sel
->prefixlen_s
!= 128)
4179 audit_log_format(audit_buf
, " src_prefixlen=%d",
4181 audit_log_format(audit_buf
, " dst=%pI6", sel
->daddr
.a6
);
4182 if (sel
->prefixlen_d
!= 128)
4183 audit_log_format(audit_buf
, " dst_prefixlen=%d",
4189 void xfrm_audit_policy_add(struct xfrm_policy
*xp
, int result
, bool task_valid
)
4191 struct audit_buffer
*audit_buf
;
4193 audit_buf
= xfrm_audit_start("SPD-add");
4194 if (audit_buf
== NULL
)
4196 xfrm_audit_helper_usrinfo(task_valid
, audit_buf
);
4197 audit_log_format(audit_buf
, " res=%u", result
);
4198 xfrm_audit_common_policyinfo(xp
, audit_buf
);
4199 audit_log_end(audit_buf
);
4201 EXPORT_SYMBOL_GPL(xfrm_audit_policy_add
);
4203 void xfrm_audit_policy_delete(struct xfrm_policy
*xp
, int result
,
4206 struct audit_buffer
*audit_buf
;
4208 audit_buf
= xfrm_audit_start("SPD-delete");
4209 if (audit_buf
== NULL
)
4211 xfrm_audit_helper_usrinfo(task_valid
, audit_buf
);
4212 audit_log_format(audit_buf
, " res=%u", result
);
4213 xfrm_audit_common_policyinfo(xp
, audit_buf
);
4214 audit_log_end(audit_buf
);
4216 EXPORT_SYMBOL_GPL(xfrm_audit_policy_delete
);
4219 #ifdef CONFIG_XFRM_MIGRATE
4220 static bool xfrm_migrate_selector_match(const struct xfrm_selector
*sel_cmp
,
4221 const struct xfrm_selector
*sel_tgt
)
4223 if (sel_cmp
->proto
== IPSEC_ULPROTO_ANY
) {
4224 if (sel_tgt
->family
== sel_cmp
->family
&&
4225 xfrm_addr_equal(&sel_tgt
->daddr
, &sel_cmp
->daddr
,
4227 xfrm_addr_equal(&sel_tgt
->saddr
, &sel_cmp
->saddr
,
4229 sel_tgt
->prefixlen_d
== sel_cmp
->prefixlen_d
&&
4230 sel_tgt
->prefixlen_s
== sel_cmp
->prefixlen_s
) {
4234 if (memcmp(sel_tgt
, sel_cmp
, sizeof(*sel_tgt
)) == 0) {
4241 static struct xfrm_policy
*xfrm_migrate_policy_find(const struct xfrm_selector
*sel
,
4242 u8 dir
, u8 type
, struct net
*net
)
4244 struct xfrm_policy
*pol
, *ret
= NULL
;
4245 struct hlist_head
*chain
;
4248 spin_lock_bh(&net
->xfrm
.xfrm_policy_lock
);
4249 chain
= policy_hash_direct(net
, &sel
->daddr
, &sel
->saddr
, sel
->family
, dir
);
4250 hlist_for_each_entry(pol
, chain
, bydst
) {
4251 if (xfrm_migrate_selector_match(sel
, &pol
->selector
) &&
4252 pol
->type
== type
) {
4254 priority
= ret
->priority
;
4258 chain
= &net
->xfrm
.policy_inexact
[dir
];
4259 hlist_for_each_entry(pol
, chain
, bydst_inexact_list
) {
4260 if ((pol
->priority
>= priority
) && ret
)
4263 if (xfrm_migrate_selector_match(sel
, &pol
->selector
) &&
4264 pol
->type
== type
) {
4272 spin_unlock_bh(&net
->xfrm
.xfrm_policy_lock
);
4277 static int migrate_tmpl_match(const struct xfrm_migrate
*m
, const struct xfrm_tmpl
*t
)
4281 if (t
->mode
== m
->mode
&& t
->id
.proto
== m
->proto
&&
4282 (m
->reqid
== 0 || t
->reqid
== m
->reqid
)) {
4284 case XFRM_MODE_TUNNEL
:
4285 case XFRM_MODE_BEET
:
4286 if (xfrm_addr_equal(&t
->id
.daddr
, &m
->old_daddr
,
4288 xfrm_addr_equal(&t
->saddr
, &m
->old_saddr
,
4293 case XFRM_MODE_TRANSPORT
:
4294 /* in case of transport mode, template does not store
4295 any IP addresses, hence we just compare mode and
4306 /* update endpoint address(es) of template(s) */
4307 static int xfrm_policy_migrate(struct xfrm_policy
*pol
,
4308 struct xfrm_migrate
*m
, int num_migrate
)
4310 struct xfrm_migrate
*mp
;
4313 write_lock_bh(&pol
->lock
);
4314 if (unlikely(pol
->walk
.dead
)) {
4315 /* target policy has been deleted */
4316 write_unlock_bh(&pol
->lock
);
4320 for (i
= 0; i
< pol
->xfrm_nr
; i
++) {
4321 for (j
= 0, mp
= m
; j
< num_migrate
; j
++, mp
++) {
4322 if (!migrate_tmpl_match(mp
, &pol
->xfrm_vec
[i
]))
4325 if (pol
->xfrm_vec
[i
].mode
!= XFRM_MODE_TUNNEL
&&
4326 pol
->xfrm_vec
[i
].mode
!= XFRM_MODE_BEET
)
4328 /* update endpoints */
4329 memcpy(&pol
->xfrm_vec
[i
].id
.daddr
, &mp
->new_daddr
,
4330 sizeof(pol
->xfrm_vec
[i
].id
.daddr
));
4331 memcpy(&pol
->xfrm_vec
[i
].saddr
, &mp
->new_saddr
,
4332 sizeof(pol
->xfrm_vec
[i
].saddr
));
4333 pol
->xfrm_vec
[i
].encap_family
= mp
->new_family
;
4335 atomic_inc(&pol
->genid
);
4339 write_unlock_bh(&pol
->lock
);
4347 static int xfrm_migrate_check(const struct xfrm_migrate
*m
, int num_migrate
)
4351 if (num_migrate
< 1 || num_migrate
> XFRM_MAX_DEPTH
)
4354 for (i
= 0; i
< num_migrate
; i
++) {
4355 if (xfrm_addr_any(&m
[i
].new_daddr
, m
[i
].new_family
) ||
4356 xfrm_addr_any(&m
[i
].new_saddr
, m
[i
].new_family
))
4359 /* check if there is any duplicated entry */
4360 for (j
= i
+ 1; j
< num_migrate
; j
++) {
4361 if (!memcmp(&m
[i
].old_daddr
, &m
[j
].old_daddr
,
4362 sizeof(m
[i
].old_daddr
)) &&
4363 !memcmp(&m
[i
].old_saddr
, &m
[j
].old_saddr
,
4364 sizeof(m
[i
].old_saddr
)) &&
4365 m
[i
].proto
== m
[j
].proto
&&
4366 m
[i
].mode
== m
[j
].mode
&&
4367 m
[i
].reqid
== m
[j
].reqid
&&
4368 m
[i
].old_family
== m
[j
].old_family
)
4376 int xfrm_migrate(const struct xfrm_selector
*sel
, u8 dir
, u8 type
,
4377 struct xfrm_migrate
*m
, int num_migrate
,
4378 struct xfrm_kmaddress
*k
, struct net
*net
,
4379 struct xfrm_encap_tmpl
*encap
)
4381 int i
, err
, nx_cur
= 0, nx_new
= 0;
4382 struct xfrm_policy
*pol
= NULL
;
4383 struct xfrm_state
*x
, *xc
;
4384 struct xfrm_state
*x_cur
[XFRM_MAX_DEPTH
];
4385 struct xfrm_state
*x_new
[XFRM_MAX_DEPTH
];
4386 struct xfrm_migrate
*mp
;
4388 /* Stage 0 - sanity checks */
4389 if ((err
= xfrm_migrate_check(m
, num_migrate
)) < 0)
4392 if (dir
>= XFRM_POLICY_MAX
) {
4397 /* Stage 1 - find policy */
4398 if ((pol
= xfrm_migrate_policy_find(sel
, dir
, type
, net
)) == NULL
) {
4403 /* Stage 2 - find and update state(s) */
4404 for (i
= 0, mp
= m
; i
< num_migrate
; i
++, mp
++) {
4405 if ((x
= xfrm_migrate_state_find(mp
, net
))) {
4408 xc
= xfrm_state_migrate(x
, mp
, encap
);
4419 /* Stage 3 - update policy */
4420 if ((err
= xfrm_policy_migrate(pol
, m
, num_migrate
)) < 0)
4423 /* Stage 4 - delete old state(s) */
4425 xfrm_states_put(x_cur
, nx_cur
);
4426 xfrm_states_delete(x_cur
, nx_cur
);
4429 /* Stage 5 - announce */
4430 km_migrate(sel
, dir
, type
, m
, num_migrate
, k
, encap
);
4442 xfrm_states_put(x_cur
, nx_cur
);
4444 xfrm_states_delete(x_new
, nx_new
);
4448 EXPORT_SYMBOL(xfrm_migrate
);