1 // SPDX-License-Identifier: GPL-2.0-only
7 * Kazunori MIYAZAWA @USAGI
8 * Kunihiro Ishiguro <kunihiro@ipinfusion.com>
10 * Kazunori MIYAZAWA @USAGI
12 * Split up af-specific portion
13 * Derek Atkins <derek@ihtfp.com> Add the post_input processor
17 #include <linux/err.h>
18 #include <linux/slab.h>
19 #include <linux/kmod.h>
20 #include <linux/list.h>
21 #include <linux/spinlock.h>
22 #include <linux/workqueue.h>
23 #include <linux/notifier.h>
24 #include <linux/netdevice.h>
25 #include <linux/netfilter.h>
26 #include <linux/module.h>
27 #include <linux/cache.h>
28 #include <linux/cpu.h>
29 #include <linux/audit.h>
30 #include <linux/rhashtable.h>
31 #include <linux/if_tunnel.h>
36 #if IS_ENABLED(CONFIG_IPV6_MIP6)
39 #ifdef CONFIG_XFRM_STATISTICS
43 #include "xfrm_hash.h"
45 #define XFRM_QUEUE_TMO_MIN ((unsigned)(HZ/10))
46 #define XFRM_QUEUE_TMO_MAX ((unsigned)(60*HZ))
47 #define XFRM_MAX_QUEUE_LEN 100
50 struct dst_entry
*dst_orig
;
54 /* prefixes smaller than this are stored in lists, not trees. */
55 #define INEXACT_PREFIXLEN_IPV4 16
56 #define INEXACT_PREFIXLEN_IPV6 48
58 struct xfrm_pol_inexact_node
{
68 /* the policies matching this node, can be empty list */
69 struct hlist_head hhead
;
72 /* xfrm inexact policy search tree:
73 * xfrm_pol_inexact_bin = hash(dir,type,family,if_id);
75 * +---- root_d: sorted by daddr:prefix
77 * | xfrm_pol_inexact_node
79 * | +- root: sorted by saddr/prefix
81 * | | xfrm_pol_inexact_node
85 * | | + hhead: saddr:daddr policies
87 * | +- coarse policies and all any:daddr policies
89 * +---- root_s: sorted by saddr:prefix
91 * | xfrm_pol_inexact_node
95 * | + hhead: saddr:any policies
97 * +---- coarse policies and all any:any policies
99 * Lookups return four candidate lists:
100 * 1. any:any list from top-level xfrm_pol_inexact_bin
101 * 2. any:daddr list from daddr tree
102 * 3. saddr:daddr list from 2nd level daddr tree
103 * 4. saddr:any list from saddr tree
105 * This result set then needs to be searched for the policy with
106 * the lowest priority. If two results have same prio, youngest one wins.
109 struct xfrm_pol_inexact_key
{
116 struct xfrm_pol_inexact_bin
{
117 struct xfrm_pol_inexact_key k
;
118 struct rhash_head head
;
119 /* list containing '*:*' policies */
120 struct hlist_head hhead
;
123 /* tree sorted by daddr/prefix */
124 struct rb_root root_d
;
126 /* tree sorted by saddr/prefix */
127 struct rb_root root_s
;
129 /* slow path below */
130 struct list_head inexact_bins
;
134 enum xfrm_pol_inexact_candidate_type
{
143 struct xfrm_pol_inexact_candidates
{
144 struct hlist_head
*res
[XFRM_POL_CAND_MAX
];
147 static DEFINE_SPINLOCK(xfrm_if_cb_lock
);
148 static struct xfrm_if_cb
const __rcu
*xfrm_if_cb __read_mostly
;
150 static DEFINE_SPINLOCK(xfrm_policy_afinfo_lock
);
151 static struct xfrm_policy_afinfo
const __rcu
*xfrm_policy_afinfo
[AF_INET6
+ 1]
154 static struct kmem_cache
*xfrm_dst_cache __ro_after_init
;
155 static __read_mostly seqcount_t xfrm_policy_hash_generation
;
157 static struct rhashtable xfrm_policy_inexact_table
;
158 static const struct rhashtable_params xfrm_pol_inexact_params
;
160 static void xfrm_init_pmtu(struct xfrm_dst
**bundle
, int nr
);
161 static int stale_bundle(struct dst_entry
*dst
);
162 static int xfrm_bundle_ok(struct xfrm_dst
*xdst
);
163 static void xfrm_policy_queue_process(struct timer_list
*t
);
165 static void __xfrm_policy_link(struct xfrm_policy
*pol
, int dir
);
166 static struct xfrm_policy
*__xfrm_policy_unlink(struct xfrm_policy
*pol
,
169 static struct xfrm_pol_inexact_bin
*
170 xfrm_policy_inexact_lookup(struct net
*net
, u8 type
, u16 family
, u8 dir
,
173 static struct xfrm_pol_inexact_bin
*
174 xfrm_policy_inexact_lookup_rcu(struct net
*net
,
175 u8 type
, u16 family
, u8 dir
, u32 if_id
);
176 static struct xfrm_policy
*
177 xfrm_policy_insert_list(struct hlist_head
*chain
, struct xfrm_policy
*policy
,
179 static void xfrm_policy_insert_inexact_list(struct hlist_head
*chain
,
180 struct xfrm_policy
*policy
);
183 xfrm_policy_find_inexact_candidates(struct xfrm_pol_inexact_candidates
*cand
,
184 struct xfrm_pol_inexact_bin
*b
,
185 const xfrm_address_t
*saddr
,
186 const xfrm_address_t
*daddr
);
188 static inline bool xfrm_pol_hold_rcu(struct xfrm_policy
*policy
)
190 return refcount_inc_not_zero(&policy
->refcnt
);
194 __xfrm4_selector_match(const struct xfrm_selector
*sel
, const struct flowi
*fl
)
196 const struct flowi4
*fl4
= &fl
->u
.ip4
;
198 return addr4_match(fl4
->daddr
, sel
->daddr
.a4
, sel
->prefixlen_d
) &&
199 addr4_match(fl4
->saddr
, sel
->saddr
.a4
, sel
->prefixlen_s
) &&
200 !((xfrm_flowi_dport(fl
, &fl4
->uli
) ^ sel
->dport
) & sel
->dport_mask
) &&
201 !((xfrm_flowi_sport(fl
, &fl4
->uli
) ^ sel
->sport
) & sel
->sport_mask
) &&
202 (fl4
->flowi4_proto
== sel
->proto
|| !sel
->proto
) &&
203 (fl4
->flowi4_oif
== sel
->ifindex
|| !sel
->ifindex
);
207 __xfrm6_selector_match(const struct xfrm_selector
*sel
, const struct flowi
*fl
)
209 const struct flowi6
*fl6
= &fl
->u
.ip6
;
211 return addr_match(&fl6
->daddr
, &sel
->daddr
, sel
->prefixlen_d
) &&
212 addr_match(&fl6
->saddr
, &sel
->saddr
, sel
->prefixlen_s
) &&
213 !((xfrm_flowi_dport(fl
, &fl6
->uli
) ^ sel
->dport
) & sel
->dport_mask
) &&
214 !((xfrm_flowi_sport(fl
, &fl6
->uli
) ^ sel
->sport
) & sel
->sport_mask
) &&
215 (fl6
->flowi6_proto
== sel
->proto
|| !sel
->proto
) &&
216 (fl6
->flowi6_oif
== sel
->ifindex
|| !sel
->ifindex
);
219 bool xfrm_selector_match(const struct xfrm_selector
*sel
, const struct flowi
*fl
,
220 unsigned short family
)
224 return __xfrm4_selector_match(sel
, fl
);
226 return __xfrm6_selector_match(sel
, fl
);
231 static const struct xfrm_policy_afinfo
*xfrm_policy_get_afinfo(unsigned short family
)
233 const struct xfrm_policy_afinfo
*afinfo
;
235 if (unlikely(family
>= ARRAY_SIZE(xfrm_policy_afinfo
)))
238 afinfo
= rcu_dereference(xfrm_policy_afinfo
[family
]);
239 if (unlikely(!afinfo
))
244 /* Called with rcu_read_lock(). */
245 static const struct xfrm_if_cb
*xfrm_if_get_cb(void)
247 return rcu_dereference(xfrm_if_cb
);
250 struct dst_entry
*__xfrm_dst_lookup(struct net
*net
, int tos
, int oif
,
251 const xfrm_address_t
*saddr
,
252 const xfrm_address_t
*daddr
,
253 int family
, u32 mark
)
255 const struct xfrm_policy_afinfo
*afinfo
;
256 struct dst_entry
*dst
;
258 afinfo
= xfrm_policy_get_afinfo(family
);
259 if (unlikely(afinfo
== NULL
))
260 return ERR_PTR(-EAFNOSUPPORT
);
262 dst
= afinfo
->dst_lookup(net
, tos
, oif
, saddr
, daddr
, mark
);
268 EXPORT_SYMBOL(__xfrm_dst_lookup
);
270 static inline struct dst_entry
*xfrm_dst_lookup(struct xfrm_state
*x
,
272 xfrm_address_t
*prev_saddr
,
273 xfrm_address_t
*prev_daddr
,
274 int family
, u32 mark
)
276 struct net
*net
= xs_net(x
);
277 xfrm_address_t
*saddr
= &x
->props
.saddr
;
278 xfrm_address_t
*daddr
= &x
->id
.daddr
;
279 struct dst_entry
*dst
;
281 if (x
->type
->flags
& XFRM_TYPE_LOCAL_COADDR
) {
285 if (x
->type
->flags
& XFRM_TYPE_REMOTE_COADDR
) {
290 dst
= __xfrm_dst_lookup(net
, tos
, oif
, saddr
, daddr
, family
, mark
);
293 if (prev_saddr
!= saddr
)
294 memcpy(prev_saddr
, saddr
, sizeof(*prev_saddr
));
295 if (prev_daddr
!= daddr
)
296 memcpy(prev_daddr
, daddr
, sizeof(*prev_daddr
));
302 static inline unsigned long make_jiffies(long secs
)
304 if (secs
>= (MAX_SCHEDULE_TIMEOUT
-1)/HZ
)
305 return MAX_SCHEDULE_TIMEOUT
-1;
310 static void xfrm_policy_timer(struct timer_list
*t
)
312 struct xfrm_policy
*xp
= from_timer(xp
, t
, timer
);
313 time64_t now
= ktime_get_real_seconds();
314 time64_t next
= TIME64_MAX
;
318 read_lock(&xp
->lock
);
320 if (unlikely(xp
->walk
.dead
))
323 dir
= xfrm_policy_id2dir(xp
->index
);
325 if (xp
->lft
.hard_add_expires_seconds
) {
326 time64_t tmo
= xp
->lft
.hard_add_expires_seconds
+
327 xp
->curlft
.add_time
- now
;
333 if (xp
->lft
.hard_use_expires_seconds
) {
334 time64_t tmo
= xp
->lft
.hard_use_expires_seconds
+
335 (xp
->curlft
.use_time
? : xp
->curlft
.add_time
) - now
;
341 if (xp
->lft
.soft_add_expires_seconds
) {
342 time64_t tmo
= xp
->lft
.soft_add_expires_seconds
+
343 xp
->curlft
.add_time
- now
;
346 tmo
= XFRM_KM_TIMEOUT
;
351 if (xp
->lft
.soft_use_expires_seconds
) {
352 time64_t tmo
= xp
->lft
.soft_use_expires_seconds
+
353 (xp
->curlft
.use_time
? : xp
->curlft
.add_time
) - now
;
356 tmo
= XFRM_KM_TIMEOUT
;
363 km_policy_expired(xp
, dir
, 0, 0);
364 if (next
!= TIME64_MAX
&&
365 !mod_timer(&xp
->timer
, jiffies
+ make_jiffies(next
)))
369 read_unlock(&xp
->lock
);
374 read_unlock(&xp
->lock
);
375 if (!xfrm_policy_delete(xp
, dir
))
376 km_policy_expired(xp
, dir
, 1, 0);
380 /* Allocate xfrm_policy. Not used here, it is supposed to be used by pfkeyv2
384 struct xfrm_policy
*xfrm_policy_alloc(struct net
*net
, gfp_t gfp
)
386 struct xfrm_policy
*policy
;
388 policy
= kzalloc(sizeof(struct xfrm_policy
), gfp
);
391 write_pnet(&policy
->xp_net
, net
);
392 INIT_LIST_HEAD(&policy
->walk
.all
);
393 INIT_HLIST_NODE(&policy
->bydst_inexact_list
);
394 INIT_HLIST_NODE(&policy
->bydst
);
395 INIT_HLIST_NODE(&policy
->byidx
);
396 rwlock_init(&policy
->lock
);
397 refcount_set(&policy
->refcnt
, 1);
398 skb_queue_head_init(&policy
->polq
.hold_queue
);
399 timer_setup(&policy
->timer
, xfrm_policy_timer
, 0);
400 timer_setup(&policy
->polq
.hold_timer
,
401 xfrm_policy_queue_process
, 0);
405 EXPORT_SYMBOL(xfrm_policy_alloc
);
407 static void xfrm_policy_destroy_rcu(struct rcu_head
*head
)
409 struct xfrm_policy
*policy
= container_of(head
, struct xfrm_policy
, rcu
);
411 security_xfrm_policy_free(policy
->security
);
415 /* Destroy xfrm_policy: descendant resources must be released to this moment. */
417 void xfrm_policy_destroy(struct xfrm_policy
*policy
)
419 BUG_ON(!policy
->walk
.dead
);
421 if (del_timer(&policy
->timer
) || del_timer(&policy
->polq
.hold_timer
))
424 call_rcu(&policy
->rcu
, xfrm_policy_destroy_rcu
);
426 EXPORT_SYMBOL(xfrm_policy_destroy
);
428 /* Rule must be locked. Release descendant resources, announce
429 * entry dead. The rule must be unlinked from lists to the moment.
432 static void xfrm_policy_kill(struct xfrm_policy
*policy
)
434 policy
->walk
.dead
= 1;
436 atomic_inc(&policy
->genid
);
438 if (del_timer(&policy
->polq
.hold_timer
))
439 xfrm_pol_put(policy
);
440 skb_queue_purge(&policy
->polq
.hold_queue
);
442 if (del_timer(&policy
->timer
))
443 xfrm_pol_put(policy
);
445 xfrm_pol_put(policy
);
448 static unsigned int xfrm_policy_hashmax __read_mostly
= 1 * 1024 * 1024;
450 static inline unsigned int idx_hash(struct net
*net
, u32 index
)
452 return __idx_hash(index
, net
->xfrm
.policy_idx_hmask
);
455 /* calculate policy hash thresholds */
456 static void __get_hash_thresh(struct net
*net
,
457 unsigned short family
, int dir
,
458 u8
*dbits
, u8
*sbits
)
462 *dbits
= net
->xfrm
.policy_bydst
[dir
].dbits4
;
463 *sbits
= net
->xfrm
.policy_bydst
[dir
].sbits4
;
467 *dbits
= net
->xfrm
.policy_bydst
[dir
].dbits6
;
468 *sbits
= net
->xfrm
.policy_bydst
[dir
].sbits6
;
477 static struct hlist_head
*policy_hash_bysel(struct net
*net
,
478 const struct xfrm_selector
*sel
,
479 unsigned short family
, int dir
)
481 unsigned int hmask
= net
->xfrm
.policy_bydst
[dir
].hmask
;
486 __get_hash_thresh(net
, family
, dir
, &dbits
, &sbits
);
487 hash
= __sel_hash(sel
, family
, hmask
, dbits
, sbits
);
489 if (hash
== hmask
+ 1)
492 return rcu_dereference_check(net
->xfrm
.policy_bydst
[dir
].table
,
493 lockdep_is_held(&net
->xfrm
.xfrm_policy_lock
)) + hash
;
496 static struct hlist_head
*policy_hash_direct(struct net
*net
,
497 const xfrm_address_t
*daddr
,
498 const xfrm_address_t
*saddr
,
499 unsigned short family
, int dir
)
501 unsigned int hmask
= net
->xfrm
.policy_bydst
[dir
].hmask
;
506 __get_hash_thresh(net
, family
, dir
, &dbits
, &sbits
);
507 hash
= __addr_hash(daddr
, saddr
, family
, hmask
, dbits
, sbits
);
509 return rcu_dereference_check(net
->xfrm
.policy_bydst
[dir
].table
,
510 lockdep_is_held(&net
->xfrm
.xfrm_policy_lock
)) + hash
;
513 static void xfrm_dst_hash_transfer(struct net
*net
,
514 struct hlist_head
*list
,
515 struct hlist_head
*ndsttable
,
516 unsigned int nhashmask
,
519 struct hlist_node
*tmp
, *entry0
= NULL
;
520 struct xfrm_policy
*pol
;
526 hlist_for_each_entry_safe(pol
, tmp
, list
, bydst
) {
529 __get_hash_thresh(net
, pol
->family
, dir
, &dbits
, &sbits
);
530 h
= __addr_hash(&pol
->selector
.daddr
, &pol
->selector
.saddr
,
531 pol
->family
, nhashmask
, dbits
, sbits
);
533 hlist_del_rcu(&pol
->bydst
);
534 hlist_add_head_rcu(&pol
->bydst
, ndsttable
+ h
);
539 hlist_del_rcu(&pol
->bydst
);
540 hlist_add_behind_rcu(&pol
->bydst
, entry0
);
542 entry0
= &pol
->bydst
;
544 if (!hlist_empty(list
)) {
550 static void xfrm_idx_hash_transfer(struct hlist_head
*list
,
551 struct hlist_head
*nidxtable
,
552 unsigned int nhashmask
)
554 struct hlist_node
*tmp
;
555 struct xfrm_policy
*pol
;
557 hlist_for_each_entry_safe(pol
, tmp
, list
, byidx
) {
560 h
= __idx_hash(pol
->index
, nhashmask
);
561 hlist_add_head(&pol
->byidx
, nidxtable
+h
);
565 static unsigned long xfrm_new_hash_mask(unsigned int old_hmask
)
567 return ((old_hmask
+ 1) << 1) - 1;
570 static void xfrm_bydst_resize(struct net
*net
, int dir
)
572 unsigned int hmask
= net
->xfrm
.policy_bydst
[dir
].hmask
;
573 unsigned int nhashmask
= xfrm_new_hash_mask(hmask
);
574 unsigned int nsize
= (nhashmask
+ 1) * sizeof(struct hlist_head
);
575 struct hlist_head
*ndst
= xfrm_hash_alloc(nsize
);
576 struct hlist_head
*odst
;
582 spin_lock_bh(&net
->xfrm
.xfrm_policy_lock
);
583 write_seqcount_begin(&xfrm_policy_hash_generation
);
585 odst
= rcu_dereference_protected(net
->xfrm
.policy_bydst
[dir
].table
,
586 lockdep_is_held(&net
->xfrm
.xfrm_policy_lock
));
588 odst
= rcu_dereference_protected(net
->xfrm
.policy_bydst
[dir
].table
,
589 lockdep_is_held(&net
->xfrm
.xfrm_policy_lock
));
591 for (i
= hmask
; i
>= 0; i
--)
592 xfrm_dst_hash_transfer(net
, odst
+ i
, ndst
, nhashmask
, dir
);
594 rcu_assign_pointer(net
->xfrm
.policy_bydst
[dir
].table
, ndst
);
595 net
->xfrm
.policy_bydst
[dir
].hmask
= nhashmask
;
597 write_seqcount_end(&xfrm_policy_hash_generation
);
598 spin_unlock_bh(&net
->xfrm
.xfrm_policy_lock
);
602 xfrm_hash_free(odst
, (hmask
+ 1) * sizeof(struct hlist_head
));
605 static void xfrm_byidx_resize(struct net
*net
, int total
)
607 unsigned int hmask
= net
->xfrm
.policy_idx_hmask
;
608 unsigned int nhashmask
= xfrm_new_hash_mask(hmask
);
609 unsigned int nsize
= (nhashmask
+ 1) * sizeof(struct hlist_head
);
610 struct hlist_head
*oidx
= net
->xfrm
.policy_byidx
;
611 struct hlist_head
*nidx
= xfrm_hash_alloc(nsize
);
617 spin_lock_bh(&net
->xfrm
.xfrm_policy_lock
);
619 for (i
= hmask
; i
>= 0; i
--)
620 xfrm_idx_hash_transfer(oidx
+ i
, nidx
, nhashmask
);
622 net
->xfrm
.policy_byidx
= nidx
;
623 net
->xfrm
.policy_idx_hmask
= nhashmask
;
625 spin_unlock_bh(&net
->xfrm
.xfrm_policy_lock
);
627 xfrm_hash_free(oidx
, (hmask
+ 1) * sizeof(struct hlist_head
));
630 static inline int xfrm_bydst_should_resize(struct net
*net
, int dir
, int *total
)
632 unsigned int cnt
= net
->xfrm
.policy_count
[dir
];
633 unsigned int hmask
= net
->xfrm
.policy_bydst
[dir
].hmask
;
638 if ((hmask
+ 1) < xfrm_policy_hashmax
&&
645 static inline int xfrm_byidx_should_resize(struct net
*net
, int total
)
647 unsigned int hmask
= net
->xfrm
.policy_idx_hmask
;
649 if ((hmask
+ 1) < xfrm_policy_hashmax
&&
656 void xfrm_spd_getinfo(struct net
*net
, struct xfrmk_spdinfo
*si
)
658 si
->incnt
= net
->xfrm
.policy_count
[XFRM_POLICY_IN
];
659 si
->outcnt
= net
->xfrm
.policy_count
[XFRM_POLICY_OUT
];
660 si
->fwdcnt
= net
->xfrm
.policy_count
[XFRM_POLICY_FWD
];
661 si
->inscnt
= net
->xfrm
.policy_count
[XFRM_POLICY_IN
+XFRM_POLICY_MAX
];
662 si
->outscnt
= net
->xfrm
.policy_count
[XFRM_POLICY_OUT
+XFRM_POLICY_MAX
];
663 si
->fwdscnt
= net
->xfrm
.policy_count
[XFRM_POLICY_FWD
+XFRM_POLICY_MAX
];
664 si
->spdhcnt
= net
->xfrm
.policy_idx_hmask
;
665 si
->spdhmcnt
= xfrm_policy_hashmax
;
667 EXPORT_SYMBOL(xfrm_spd_getinfo
);
669 static DEFINE_MUTEX(hash_resize_mutex
);
670 static void xfrm_hash_resize(struct work_struct
*work
)
672 struct net
*net
= container_of(work
, struct net
, xfrm
.policy_hash_work
);
675 mutex_lock(&hash_resize_mutex
);
678 for (dir
= 0; dir
< XFRM_POLICY_MAX
; dir
++) {
679 if (xfrm_bydst_should_resize(net
, dir
, &total
))
680 xfrm_bydst_resize(net
, dir
);
682 if (xfrm_byidx_should_resize(net
, total
))
683 xfrm_byidx_resize(net
, total
);
685 mutex_unlock(&hash_resize_mutex
);
688 /* Make sure *pol can be inserted into fastbin.
689 * Useful to check that later insert requests will be sucessful
690 * (provided xfrm_policy_lock is held throughout).
692 static struct xfrm_pol_inexact_bin
*
693 xfrm_policy_inexact_alloc_bin(const struct xfrm_policy
*pol
, u8 dir
)
695 struct xfrm_pol_inexact_bin
*bin
, *prev
;
696 struct xfrm_pol_inexact_key k
= {
697 .family
= pol
->family
,
702 struct net
*net
= xp_net(pol
);
704 lockdep_assert_held(&net
->xfrm
.xfrm_policy_lock
);
706 write_pnet(&k
.net
, net
);
707 bin
= rhashtable_lookup_fast(&xfrm_policy_inexact_table
, &k
,
708 xfrm_pol_inexact_params
);
712 bin
= kzalloc(sizeof(*bin
), GFP_ATOMIC
);
717 INIT_HLIST_HEAD(&bin
->hhead
);
718 bin
->root_d
= RB_ROOT
;
719 bin
->root_s
= RB_ROOT
;
720 seqcount_init(&bin
->count
);
722 prev
= rhashtable_lookup_get_insert_key(&xfrm_policy_inexact_table
,
724 xfrm_pol_inexact_params
);
726 list_add(&bin
->inexact_bins
, &net
->xfrm
.inexact_bins
);
732 return IS_ERR(prev
) ? NULL
: prev
;
735 static bool xfrm_pol_inexact_addr_use_any_list(const xfrm_address_t
*addr
,
736 int family
, u8 prefixlen
)
738 if (xfrm_addr_any(addr
, family
))
741 if (family
== AF_INET6
&& prefixlen
< INEXACT_PREFIXLEN_IPV6
)
744 if (family
== AF_INET
&& prefixlen
< INEXACT_PREFIXLEN_IPV4
)
751 xfrm_policy_inexact_insert_use_any_list(const struct xfrm_policy
*policy
)
753 const xfrm_address_t
*addr
;
754 bool saddr_any
, daddr_any
;
757 addr
= &policy
->selector
.saddr
;
758 prefixlen
= policy
->selector
.prefixlen_s
;
760 saddr_any
= xfrm_pol_inexact_addr_use_any_list(addr
,
763 addr
= &policy
->selector
.daddr
;
764 prefixlen
= policy
->selector
.prefixlen_d
;
765 daddr_any
= xfrm_pol_inexact_addr_use_any_list(addr
,
768 return saddr_any
&& daddr_any
;
771 static void xfrm_pol_inexact_node_init(struct xfrm_pol_inexact_node
*node
,
772 const xfrm_address_t
*addr
, u8 prefixlen
)
775 node
->prefixlen
= prefixlen
;
778 static struct xfrm_pol_inexact_node
*
779 xfrm_pol_inexact_node_alloc(const xfrm_address_t
*addr
, u8 prefixlen
)
781 struct xfrm_pol_inexact_node
*node
;
783 node
= kzalloc(sizeof(*node
), GFP_ATOMIC
);
785 xfrm_pol_inexact_node_init(node
, addr
, prefixlen
);
790 static int xfrm_policy_addr_delta(const xfrm_address_t
*a
,
791 const xfrm_address_t
*b
,
792 u8 prefixlen
, u16 family
)
794 unsigned int pdw
, pbi
;
799 if (sizeof(long) == 4 && prefixlen
== 0)
800 return ntohl(a
->a4
) - ntohl(b
->a4
);
801 return (ntohl(a
->a4
) & ((~0UL << (32 - prefixlen
)))) -
802 (ntohl(b
->a4
) & ((~0UL << (32 - prefixlen
))));
804 pdw
= prefixlen
>> 5;
805 pbi
= prefixlen
& 0x1f;
808 delta
= memcmp(a
->a6
, b
->a6
, pdw
<< 2);
813 u32 mask
= ~0u << (32 - pbi
);
815 delta
= (ntohl(a
->a6
[pdw
]) & mask
) -
816 (ntohl(b
->a6
[pdw
]) & mask
);
826 static void xfrm_policy_inexact_list_reinsert(struct net
*net
,
827 struct xfrm_pol_inexact_node
*n
,
830 unsigned int matched_s
, matched_d
;
831 struct xfrm_policy
*policy
, *p
;
836 list_for_each_entry_reverse(policy
, &net
->xfrm
.policy_all
, walk
.all
) {
837 struct hlist_node
*newpos
= NULL
;
838 bool matches_s
, matches_d
;
840 if (!policy
->bydst_reinsert
)
843 WARN_ON_ONCE(policy
->family
!= family
);
845 policy
->bydst_reinsert
= false;
846 hlist_for_each_entry(p
, &n
->hhead
, bydst
) {
847 if (policy
->priority
> p
->priority
)
849 else if (policy
->priority
== p
->priority
&&
850 policy
->pos
> p
->pos
)
857 hlist_add_behind_rcu(&policy
->bydst
, newpos
);
859 hlist_add_head_rcu(&policy
->bydst
, &n
->hhead
);
861 /* paranoia checks follow.
862 * Check that the reinserted policy matches at least
863 * saddr or daddr for current node prefix.
865 * Matching both is fine, matching saddr in one policy
866 * (but not daddr) and then matching only daddr in another
869 matches_s
= xfrm_policy_addr_delta(&policy
->selector
.saddr
,
873 matches_d
= xfrm_policy_addr_delta(&policy
->selector
.daddr
,
877 if (matches_s
&& matches_d
)
880 WARN_ON_ONCE(!matches_s
&& !matches_d
);
885 WARN_ON_ONCE(matched_s
&& matched_d
);
889 static void xfrm_policy_inexact_node_reinsert(struct net
*net
,
890 struct xfrm_pol_inexact_node
*n
,
894 struct xfrm_pol_inexact_node
*node
;
895 struct rb_node
**p
, *parent
;
897 /* we should not have another subtree here */
898 WARN_ON_ONCE(!RB_EMPTY_ROOT(&n
->root
));
907 node
= rb_entry(*p
, struct xfrm_pol_inexact_node
, node
);
909 prefixlen
= min(node
->prefixlen
, n
->prefixlen
);
911 delta
= xfrm_policy_addr_delta(&n
->addr
, &node
->addr
,
914 p
= &parent
->rb_left
;
915 } else if (delta
> 0) {
916 p
= &parent
->rb_right
;
918 struct xfrm_policy
*tmp
;
920 hlist_for_each_entry(tmp
, &n
->hhead
, bydst
) {
921 tmp
->bydst_reinsert
= true;
922 hlist_del_rcu(&tmp
->bydst
);
925 xfrm_policy_inexact_list_reinsert(net
, node
, family
);
927 if (node
->prefixlen
== n
->prefixlen
) {
935 n
->prefixlen
= prefixlen
;
940 rb_link_node_rcu(&n
->node
, parent
, p
);
941 rb_insert_color(&n
->node
, new);
944 /* merge nodes v and n */
945 static void xfrm_policy_inexact_node_merge(struct net
*net
,
946 struct xfrm_pol_inexact_node
*v
,
947 struct xfrm_pol_inexact_node
*n
,
950 struct xfrm_pol_inexact_node
*node
;
951 struct xfrm_policy
*tmp
;
952 struct rb_node
*rnode
;
954 /* To-be-merged node v has a subtree.
956 * Dismantle it and insert its nodes to n->root.
958 while ((rnode
= rb_first(&v
->root
)) != NULL
) {
959 node
= rb_entry(rnode
, struct xfrm_pol_inexact_node
, node
);
960 rb_erase(&node
->node
, &v
->root
);
961 xfrm_policy_inexact_node_reinsert(net
, node
, &n
->root
,
965 hlist_for_each_entry(tmp
, &v
->hhead
, bydst
) {
966 tmp
->bydst_reinsert
= true;
967 hlist_del_rcu(&tmp
->bydst
);
970 xfrm_policy_inexact_list_reinsert(net
, n
, family
);
973 static struct xfrm_pol_inexact_node
*
974 xfrm_policy_inexact_insert_node(struct net
*net
,
975 struct rb_root
*root
,
976 xfrm_address_t
*addr
,
977 u16 family
, u8 prefixlen
, u8 dir
)
979 struct xfrm_pol_inexact_node
*cached
= NULL
;
980 struct rb_node
**p
, *parent
= NULL
;
981 struct xfrm_pol_inexact_node
*node
;
988 node
= rb_entry(*p
, struct xfrm_pol_inexact_node
, node
);
990 delta
= xfrm_policy_addr_delta(addr
, &node
->addr
,
993 if (delta
== 0 && prefixlen
>= node
->prefixlen
) {
994 WARN_ON_ONCE(cached
); /* ipsec policies got lost */
999 p
= &parent
->rb_left
;
1001 p
= &parent
->rb_right
;
1003 if (prefixlen
< node
->prefixlen
) {
1004 delta
= xfrm_policy_addr_delta(addr
, &node
->addr
,
1010 /* This node is a subnet of the new prefix. It needs
1011 * to be removed and re-inserted with the smaller
1012 * prefix and all nodes that are now also covered
1013 * by the reduced prefixlen.
1015 rb_erase(&node
->node
, root
);
1018 xfrm_pol_inexact_node_init(node
, addr
,
1022 /* This node also falls within the new
1023 * prefixlen. Merge the to-be-reinserted
1024 * node and this one.
1026 xfrm_policy_inexact_node_merge(net
, node
,
1028 kfree_rcu(node
, rcu
);
1039 node
= xfrm_pol_inexact_node_alloc(addr
, prefixlen
);
1044 rb_link_node_rcu(&node
->node
, parent
, p
);
1045 rb_insert_color(&node
->node
, root
);
1050 static void xfrm_policy_inexact_gc_tree(struct rb_root
*r
, bool rm
)
1052 struct xfrm_pol_inexact_node
*node
;
1053 struct rb_node
*rn
= rb_first(r
);
1056 node
= rb_entry(rn
, struct xfrm_pol_inexact_node
, node
);
1058 xfrm_policy_inexact_gc_tree(&node
->root
, rm
);
1061 if (!hlist_empty(&node
->hhead
) || !RB_EMPTY_ROOT(&node
->root
)) {
1066 rb_erase(&node
->node
, r
);
1067 kfree_rcu(node
, rcu
);
1071 static void __xfrm_policy_inexact_prune_bin(struct xfrm_pol_inexact_bin
*b
, bool net_exit
)
1073 write_seqcount_begin(&b
->count
);
1074 xfrm_policy_inexact_gc_tree(&b
->root_d
, net_exit
);
1075 xfrm_policy_inexact_gc_tree(&b
->root_s
, net_exit
);
1076 write_seqcount_end(&b
->count
);
1078 if (!RB_EMPTY_ROOT(&b
->root_d
) || !RB_EMPTY_ROOT(&b
->root_s
) ||
1079 !hlist_empty(&b
->hhead
)) {
1080 WARN_ON_ONCE(net_exit
);
1084 if (rhashtable_remove_fast(&xfrm_policy_inexact_table
, &b
->head
,
1085 xfrm_pol_inexact_params
) == 0) {
1086 list_del(&b
->inexact_bins
);
1091 static void xfrm_policy_inexact_prune_bin(struct xfrm_pol_inexact_bin
*b
)
1093 struct net
*net
= read_pnet(&b
->k
.net
);
1095 spin_lock_bh(&net
->xfrm
.xfrm_policy_lock
);
1096 __xfrm_policy_inexact_prune_bin(b
, false);
1097 spin_unlock_bh(&net
->xfrm
.xfrm_policy_lock
);
1100 static void __xfrm_policy_inexact_flush(struct net
*net
)
1102 struct xfrm_pol_inexact_bin
*bin
, *t
;
1104 lockdep_assert_held(&net
->xfrm
.xfrm_policy_lock
);
1106 list_for_each_entry_safe(bin
, t
, &net
->xfrm
.inexact_bins
, inexact_bins
)
1107 __xfrm_policy_inexact_prune_bin(bin
, false);
1110 static struct hlist_head
*
1111 xfrm_policy_inexact_alloc_chain(struct xfrm_pol_inexact_bin
*bin
,
1112 struct xfrm_policy
*policy
, u8 dir
)
1114 struct xfrm_pol_inexact_node
*n
;
1117 net
= xp_net(policy
);
1118 lockdep_assert_held(&net
->xfrm
.xfrm_policy_lock
);
1120 if (xfrm_policy_inexact_insert_use_any_list(policy
))
1123 if (xfrm_pol_inexact_addr_use_any_list(&policy
->selector
.daddr
,
1125 policy
->selector
.prefixlen_d
)) {
1126 write_seqcount_begin(&bin
->count
);
1127 n
= xfrm_policy_inexact_insert_node(net
,
1129 &policy
->selector
.saddr
,
1131 policy
->selector
.prefixlen_s
,
1133 write_seqcount_end(&bin
->count
);
1140 /* daddr is fixed */
1141 write_seqcount_begin(&bin
->count
);
1142 n
= xfrm_policy_inexact_insert_node(net
,
1144 &policy
->selector
.daddr
,
1146 policy
->selector
.prefixlen_d
, dir
);
1147 write_seqcount_end(&bin
->count
);
1151 /* saddr is wildcard */
1152 if (xfrm_pol_inexact_addr_use_any_list(&policy
->selector
.saddr
,
1154 policy
->selector
.prefixlen_s
))
1157 write_seqcount_begin(&bin
->count
);
1158 n
= xfrm_policy_inexact_insert_node(net
,
1160 &policy
->selector
.saddr
,
1162 policy
->selector
.prefixlen_s
, dir
);
1163 write_seqcount_end(&bin
->count
);
1170 static struct xfrm_policy
*
1171 xfrm_policy_inexact_insert(struct xfrm_policy
*policy
, u8 dir
, int excl
)
1173 struct xfrm_pol_inexact_bin
*bin
;
1174 struct xfrm_policy
*delpol
;
1175 struct hlist_head
*chain
;
1178 bin
= xfrm_policy_inexact_alloc_bin(policy
, dir
);
1180 return ERR_PTR(-ENOMEM
);
1182 net
= xp_net(policy
);
1183 lockdep_assert_held(&net
->xfrm
.xfrm_policy_lock
);
1185 chain
= xfrm_policy_inexact_alloc_chain(bin
, policy
, dir
);
1187 __xfrm_policy_inexact_prune_bin(bin
, false);
1188 return ERR_PTR(-ENOMEM
);
1191 delpol
= xfrm_policy_insert_list(chain
, policy
, excl
);
1192 if (delpol
&& excl
) {
1193 __xfrm_policy_inexact_prune_bin(bin
, false);
1194 return ERR_PTR(-EEXIST
);
1197 chain
= &net
->xfrm
.policy_inexact
[dir
];
1198 xfrm_policy_insert_inexact_list(chain
, policy
);
1201 __xfrm_policy_inexact_prune_bin(bin
, false);
1206 static void xfrm_hash_rebuild(struct work_struct
*work
)
1208 struct net
*net
= container_of(work
, struct net
,
1209 xfrm
.policy_hthresh
.work
);
1211 struct xfrm_policy
*pol
;
1212 struct xfrm_policy
*policy
;
1213 struct hlist_head
*chain
;
1214 struct hlist_head
*odst
;
1215 struct hlist_node
*newpos
;
1219 u8 lbits4
, rbits4
, lbits6
, rbits6
;
1221 mutex_lock(&hash_resize_mutex
);
1223 /* read selector prefixlen thresholds */
1225 seq
= read_seqbegin(&net
->xfrm
.policy_hthresh
.lock
);
1227 lbits4
= net
->xfrm
.policy_hthresh
.lbits4
;
1228 rbits4
= net
->xfrm
.policy_hthresh
.rbits4
;
1229 lbits6
= net
->xfrm
.policy_hthresh
.lbits6
;
1230 rbits6
= net
->xfrm
.policy_hthresh
.rbits6
;
1231 } while (read_seqretry(&net
->xfrm
.policy_hthresh
.lock
, seq
));
1233 spin_lock_bh(&net
->xfrm
.xfrm_policy_lock
);
1234 write_seqcount_begin(&xfrm_policy_hash_generation
);
1236 /* make sure that we can insert the indirect policies again before
1237 * we start with destructive action.
1239 list_for_each_entry(policy
, &net
->xfrm
.policy_all
, walk
.all
) {
1240 struct xfrm_pol_inexact_bin
*bin
;
1243 dir
= xfrm_policy_id2dir(policy
->index
);
1244 if (policy
->walk
.dead
|| dir
>= XFRM_POLICY_MAX
)
1247 if ((dir
& XFRM_POLICY_MASK
) == XFRM_POLICY_OUT
) {
1248 if (policy
->family
== AF_INET
) {
1256 if (policy
->family
== AF_INET
) {
1265 if (policy
->selector
.prefixlen_d
< dbits
||
1266 policy
->selector
.prefixlen_s
< sbits
)
1269 bin
= xfrm_policy_inexact_alloc_bin(policy
, dir
);
1273 if (!xfrm_policy_inexact_alloc_chain(bin
, policy
, dir
))
1277 /* reset the bydst and inexact table in all directions */
1278 for (dir
= 0; dir
< XFRM_POLICY_MAX
; dir
++) {
1279 struct hlist_node
*n
;
1281 hlist_for_each_entry_safe(policy
, n
,
1282 &net
->xfrm
.policy_inexact
[dir
],
1284 hlist_del_init(&policy
->bydst_inexact_list
);
1286 hmask
= net
->xfrm
.policy_bydst
[dir
].hmask
;
1287 odst
= net
->xfrm
.policy_bydst
[dir
].table
;
1288 for (i
= hmask
; i
>= 0; i
--)
1289 INIT_HLIST_HEAD(odst
+ i
);
1290 if ((dir
& XFRM_POLICY_MASK
) == XFRM_POLICY_OUT
) {
1291 /* dir out => dst = remote, src = local */
1292 net
->xfrm
.policy_bydst
[dir
].dbits4
= rbits4
;
1293 net
->xfrm
.policy_bydst
[dir
].sbits4
= lbits4
;
1294 net
->xfrm
.policy_bydst
[dir
].dbits6
= rbits6
;
1295 net
->xfrm
.policy_bydst
[dir
].sbits6
= lbits6
;
1297 /* dir in/fwd => dst = local, src = remote */
1298 net
->xfrm
.policy_bydst
[dir
].dbits4
= lbits4
;
1299 net
->xfrm
.policy_bydst
[dir
].sbits4
= rbits4
;
1300 net
->xfrm
.policy_bydst
[dir
].dbits6
= lbits6
;
1301 net
->xfrm
.policy_bydst
[dir
].sbits6
= rbits6
;
1305 /* re-insert all policies by order of creation */
1306 list_for_each_entry_reverse(policy
, &net
->xfrm
.policy_all
, walk
.all
) {
1307 if (policy
->walk
.dead
)
1309 dir
= xfrm_policy_id2dir(policy
->index
);
1310 if (dir
>= XFRM_POLICY_MAX
) {
1311 /* skip socket policies */
1315 chain
= policy_hash_bysel(net
, &policy
->selector
,
1316 policy
->family
, dir
);
1318 hlist_del_rcu(&policy
->bydst
);
1321 void *p
= xfrm_policy_inexact_insert(policy
, dir
, 0);
1323 WARN_ONCE(IS_ERR(p
), "reinsert: %ld\n", PTR_ERR(p
));
1327 hlist_for_each_entry(pol
, chain
, bydst
) {
1328 if (policy
->priority
>= pol
->priority
)
1329 newpos
= &pol
->bydst
;
1334 hlist_add_behind_rcu(&policy
->bydst
, newpos
);
1336 hlist_add_head_rcu(&policy
->bydst
, chain
);
1340 __xfrm_policy_inexact_flush(net
);
1341 write_seqcount_end(&xfrm_policy_hash_generation
);
1342 spin_unlock_bh(&net
->xfrm
.xfrm_policy_lock
);
1344 mutex_unlock(&hash_resize_mutex
);
1347 void xfrm_policy_hash_rebuild(struct net
*net
)
1349 schedule_work(&net
->xfrm
.policy_hthresh
.work
);
1351 EXPORT_SYMBOL(xfrm_policy_hash_rebuild
);
1353 /* Generate new index... KAME seems to generate them ordered by cost
1354 * of an absolute inpredictability of ordering of rules. This will not pass. */
1355 static u32
xfrm_gen_index(struct net
*net
, int dir
, u32 index
)
1357 static u32 idx_generator
;
1360 struct hlist_head
*list
;
1361 struct xfrm_policy
*p
;
1366 idx
= (idx_generator
| dir
);
1375 list
= net
->xfrm
.policy_byidx
+ idx_hash(net
, idx
);
1377 hlist_for_each_entry(p
, list
, byidx
) {
1378 if (p
->index
== idx
) {
1388 static inline int selector_cmp(struct xfrm_selector
*s1
, struct xfrm_selector
*s2
)
1390 u32
*p1
= (u32
*) s1
;
1391 u32
*p2
= (u32
*) s2
;
1392 int len
= sizeof(struct xfrm_selector
) / sizeof(u32
);
1395 for (i
= 0; i
< len
; i
++) {
1403 static void xfrm_policy_requeue(struct xfrm_policy
*old
,
1404 struct xfrm_policy
*new)
1406 struct xfrm_policy_queue
*pq
= &old
->polq
;
1407 struct sk_buff_head list
;
1409 if (skb_queue_empty(&pq
->hold_queue
))
1412 __skb_queue_head_init(&list
);
1414 spin_lock_bh(&pq
->hold_queue
.lock
);
1415 skb_queue_splice_init(&pq
->hold_queue
, &list
);
1416 if (del_timer(&pq
->hold_timer
))
1418 spin_unlock_bh(&pq
->hold_queue
.lock
);
1422 spin_lock_bh(&pq
->hold_queue
.lock
);
1423 skb_queue_splice(&list
, &pq
->hold_queue
);
1424 pq
->timeout
= XFRM_QUEUE_TMO_MIN
;
1425 if (!mod_timer(&pq
->hold_timer
, jiffies
))
1427 spin_unlock_bh(&pq
->hold_queue
.lock
);
1430 static bool xfrm_policy_mark_match(struct xfrm_policy
*policy
,
1431 struct xfrm_policy
*pol
)
1433 u32 mark
= policy
->mark
.v
& policy
->mark
.m
;
1435 if (policy
->mark
.v
== pol
->mark
.v
&& policy
->mark
.m
== pol
->mark
.m
)
1438 if ((mark
& pol
->mark
.m
) == pol
->mark
.v
&&
1439 policy
->priority
== pol
->priority
)
1445 static u32
xfrm_pol_bin_key(const void *data
, u32 len
, u32 seed
)
1447 const struct xfrm_pol_inexact_key
*k
= data
;
1448 u32 a
= k
->type
<< 24 | k
->dir
<< 16 | k
->family
;
1450 return jhash_3words(a
, k
->if_id
, net_hash_mix(read_pnet(&k
->net
)),
1454 static u32
xfrm_pol_bin_obj(const void *data
, u32 len
, u32 seed
)
1456 const struct xfrm_pol_inexact_bin
*b
= data
;
1458 return xfrm_pol_bin_key(&b
->k
, 0, seed
);
1461 static int xfrm_pol_bin_cmp(struct rhashtable_compare_arg
*arg
,
1464 const struct xfrm_pol_inexact_key
*key
= arg
->key
;
1465 const struct xfrm_pol_inexact_bin
*b
= ptr
;
1468 if (!net_eq(read_pnet(&b
->k
.net
), read_pnet(&key
->net
)))
1471 ret
= b
->k
.dir
^ key
->dir
;
1475 ret
= b
->k
.type
^ key
->type
;
1479 ret
= b
->k
.family
^ key
->family
;
1483 return b
->k
.if_id
^ key
->if_id
;
1486 static const struct rhashtable_params xfrm_pol_inexact_params
= {
1487 .head_offset
= offsetof(struct xfrm_pol_inexact_bin
, head
),
1488 .hashfn
= xfrm_pol_bin_key
,
1489 .obj_hashfn
= xfrm_pol_bin_obj
,
1490 .obj_cmpfn
= xfrm_pol_bin_cmp
,
1491 .automatic_shrinking
= true,
1494 static void xfrm_policy_insert_inexact_list(struct hlist_head
*chain
,
1495 struct xfrm_policy
*policy
)
1497 struct xfrm_policy
*pol
, *delpol
= NULL
;
1498 struct hlist_node
*newpos
= NULL
;
1501 hlist_for_each_entry(pol
, chain
, bydst_inexact_list
) {
1502 if (pol
->type
== policy
->type
&&
1503 pol
->if_id
== policy
->if_id
&&
1504 !selector_cmp(&pol
->selector
, &policy
->selector
) &&
1505 xfrm_policy_mark_match(policy
, pol
) &&
1506 xfrm_sec_ctx_match(pol
->security
, policy
->security
) &&
1509 if (policy
->priority
> pol
->priority
)
1511 } else if (policy
->priority
>= pol
->priority
) {
1512 newpos
= &pol
->bydst_inexact_list
;
1520 hlist_add_behind_rcu(&policy
->bydst_inexact_list
, newpos
);
1522 hlist_add_head_rcu(&policy
->bydst_inexact_list
, chain
);
1524 hlist_for_each_entry(pol
, chain
, bydst_inexact_list
) {
1530 static struct xfrm_policy
*xfrm_policy_insert_list(struct hlist_head
*chain
,
1531 struct xfrm_policy
*policy
,
1534 struct xfrm_policy
*pol
, *newpos
= NULL
, *delpol
= NULL
;
1536 hlist_for_each_entry(pol
, chain
, bydst
) {
1537 if (pol
->type
== policy
->type
&&
1538 pol
->if_id
== policy
->if_id
&&
1539 !selector_cmp(&pol
->selector
, &policy
->selector
) &&
1540 xfrm_policy_mark_match(policy
, pol
) &&
1541 xfrm_sec_ctx_match(pol
->security
, policy
->security
) &&
1544 return ERR_PTR(-EEXIST
);
1546 if (policy
->priority
> pol
->priority
)
1548 } else if (policy
->priority
>= pol
->priority
) {
1557 hlist_add_behind_rcu(&policy
->bydst
, &newpos
->bydst
);
1559 hlist_add_head_rcu(&policy
->bydst
, chain
);
1564 int xfrm_policy_insert(int dir
, struct xfrm_policy
*policy
, int excl
)
1566 struct net
*net
= xp_net(policy
);
1567 struct xfrm_policy
*delpol
;
1568 struct hlist_head
*chain
;
1570 spin_lock_bh(&net
->xfrm
.xfrm_policy_lock
);
1571 chain
= policy_hash_bysel(net
, &policy
->selector
, policy
->family
, dir
);
1573 delpol
= xfrm_policy_insert_list(chain
, policy
, excl
);
1575 delpol
= xfrm_policy_inexact_insert(policy
, dir
, excl
);
1577 if (IS_ERR(delpol
)) {
1578 spin_unlock_bh(&net
->xfrm
.xfrm_policy_lock
);
1579 return PTR_ERR(delpol
);
1582 __xfrm_policy_link(policy
, dir
);
1584 /* After previous checking, family can either be AF_INET or AF_INET6 */
1585 if (policy
->family
== AF_INET
)
1586 rt_genid_bump_ipv4(net
);
1588 rt_genid_bump_ipv6(net
);
1591 xfrm_policy_requeue(delpol
, policy
);
1592 __xfrm_policy_unlink(delpol
, dir
);
1594 policy
->index
= delpol
? delpol
->index
: xfrm_gen_index(net
, dir
, policy
->index
);
1595 hlist_add_head(&policy
->byidx
, net
->xfrm
.policy_byidx
+idx_hash(net
, policy
->index
));
1596 policy
->curlft
.add_time
= ktime_get_real_seconds();
1597 policy
->curlft
.use_time
= 0;
1598 if (!mod_timer(&policy
->timer
, jiffies
+ HZ
))
1599 xfrm_pol_hold(policy
);
1600 spin_unlock_bh(&net
->xfrm
.xfrm_policy_lock
);
1603 xfrm_policy_kill(delpol
);
1604 else if (xfrm_bydst_should_resize(net
, dir
, NULL
))
1605 schedule_work(&net
->xfrm
.policy_hash_work
);
1609 EXPORT_SYMBOL(xfrm_policy_insert
);
1611 static struct xfrm_policy
*
1612 __xfrm_policy_bysel_ctx(struct hlist_head
*chain
, u32 mark
, u32 if_id
,
1614 struct xfrm_selector
*sel
,
1615 struct xfrm_sec_ctx
*ctx
)
1617 struct xfrm_policy
*pol
;
1622 hlist_for_each_entry(pol
, chain
, bydst
) {
1623 if (pol
->type
== type
&&
1624 pol
->if_id
== if_id
&&
1625 (mark
& pol
->mark
.m
) == pol
->mark
.v
&&
1626 !selector_cmp(sel
, &pol
->selector
) &&
1627 xfrm_sec_ctx_match(ctx
, pol
->security
))
1634 struct xfrm_policy
*xfrm_policy_bysel_ctx(struct net
*net
, u32 mark
, u32 if_id
,
1636 struct xfrm_selector
*sel
,
1637 struct xfrm_sec_ctx
*ctx
, int delete,
1640 struct xfrm_pol_inexact_bin
*bin
= NULL
;
1641 struct xfrm_policy
*pol
, *ret
= NULL
;
1642 struct hlist_head
*chain
;
1645 spin_lock_bh(&net
->xfrm
.xfrm_policy_lock
);
1646 chain
= policy_hash_bysel(net
, sel
, sel
->family
, dir
);
1648 struct xfrm_pol_inexact_candidates cand
;
1651 bin
= xfrm_policy_inexact_lookup(net
, type
,
1652 sel
->family
, dir
, if_id
);
1654 spin_unlock_bh(&net
->xfrm
.xfrm_policy_lock
);
1658 if (!xfrm_policy_find_inexact_candidates(&cand
, bin
,
1661 spin_unlock_bh(&net
->xfrm
.xfrm_policy_lock
);
1666 for (i
= 0; i
< ARRAY_SIZE(cand
.res
); i
++) {
1667 struct xfrm_policy
*tmp
;
1669 tmp
= __xfrm_policy_bysel_ctx(cand
.res
[i
], mark
,
1675 if (!pol
|| tmp
->pos
< pol
->pos
)
1679 pol
= __xfrm_policy_bysel_ctx(chain
, mark
, if_id
, type
, dir
,
1686 *err
= security_xfrm_policy_delete(pol
->security
);
1688 spin_unlock_bh(&net
->xfrm
.xfrm_policy_lock
);
1691 __xfrm_policy_unlink(pol
, dir
);
1695 spin_unlock_bh(&net
->xfrm
.xfrm_policy_lock
);
1698 xfrm_policy_kill(ret
);
1700 xfrm_policy_inexact_prune_bin(bin
);
1703 EXPORT_SYMBOL(xfrm_policy_bysel_ctx
);
1705 struct xfrm_policy
*xfrm_policy_byid(struct net
*net
, u32 mark
, u32 if_id
,
1706 u8 type
, int dir
, u32 id
, int delete,
1709 struct xfrm_policy
*pol
, *ret
;
1710 struct hlist_head
*chain
;
1713 if (xfrm_policy_id2dir(id
) != dir
)
1717 spin_lock_bh(&net
->xfrm
.xfrm_policy_lock
);
1718 chain
= net
->xfrm
.policy_byidx
+ idx_hash(net
, id
);
1720 hlist_for_each_entry(pol
, chain
, byidx
) {
1721 if (pol
->type
== type
&& pol
->index
== id
&&
1722 pol
->if_id
== if_id
&&
1723 (mark
& pol
->mark
.m
) == pol
->mark
.v
) {
1726 *err
= security_xfrm_policy_delete(
1729 spin_unlock_bh(&net
->xfrm
.xfrm_policy_lock
);
1732 __xfrm_policy_unlink(pol
, dir
);
1738 spin_unlock_bh(&net
->xfrm
.xfrm_policy_lock
);
1741 xfrm_policy_kill(ret
);
1744 EXPORT_SYMBOL(xfrm_policy_byid
);
1746 #ifdef CONFIG_SECURITY_NETWORK_XFRM
1748 xfrm_policy_flush_secctx_check(struct net
*net
, u8 type
, bool task_valid
)
1750 struct xfrm_policy
*pol
;
1753 list_for_each_entry(pol
, &net
->xfrm
.policy_all
, walk
.all
) {
1754 if (pol
->walk
.dead
||
1755 xfrm_policy_id2dir(pol
->index
) >= XFRM_POLICY_MAX
||
1759 err
= security_xfrm_policy_delete(pol
->security
);
1761 xfrm_audit_policy_delete(pol
, 0, task_valid
);
1769 xfrm_policy_flush_secctx_check(struct net
*net
, u8 type
, bool task_valid
)
1775 int xfrm_policy_flush(struct net
*net
, u8 type
, bool task_valid
)
1777 int dir
, err
= 0, cnt
= 0;
1778 struct xfrm_policy
*pol
;
1780 spin_lock_bh(&net
->xfrm
.xfrm_policy_lock
);
1782 err
= xfrm_policy_flush_secctx_check(net
, type
, task_valid
);
1787 list_for_each_entry(pol
, &net
->xfrm
.policy_all
, walk
.all
) {
1788 dir
= xfrm_policy_id2dir(pol
->index
);
1789 if (pol
->walk
.dead
||
1790 dir
>= XFRM_POLICY_MAX
||
1794 __xfrm_policy_unlink(pol
, dir
);
1795 spin_unlock_bh(&net
->xfrm
.xfrm_policy_lock
);
1797 xfrm_audit_policy_delete(pol
, 1, task_valid
);
1798 xfrm_policy_kill(pol
);
1799 spin_lock_bh(&net
->xfrm
.xfrm_policy_lock
);
1803 __xfrm_policy_inexact_flush(net
);
1807 spin_unlock_bh(&net
->xfrm
.xfrm_policy_lock
);
1810 EXPORT_SYMBOL(xfrm_policy_flush
);
1812 int xfrm_policy_walk(struct net
*net
, struct xfrm_policy_walk
*walk
,
1813 int (*func
)(struct xfrm_policy
*, int, int, void*),
1816 struct xfrm_policy
*pol
;
1817 struct xfrm_policy_walk_entry
*x
;
1820 if (walk
->type
>= XFRM_POLICY_TYPE_MAX
&&
1821 walk
->type
!= XFRM_POLICY_TYPE_ANY
)
1824 if (list_empty(&walk
->walk
.all
) && walk
->seq
!= 0)
1827 spin_lock_bh(&net
->xfrm
.xfrm_policy_lock
);
1828 if (list_empty(&walk
->walk
.all
))
1829 x
= list_first_entry(&net
->xfrm
.policy_all
, struct xfrm_policy_walk_entry
, all
);
1831 x
= list_first_entry(&walk
->walk
.all
,
1832 struct xfrm_policy_walk_entry
, all
);
1834 list_for_each_entry_from(x
, &net
->xfrm
.policy_all
, all
) {
1837 pol
= container_of(x
, struct xfrm_policy
, walk
);
1838 if (walk
->type
!= XFRM_POLICY_TYPE_ANY
&&
1839 walk
->type
!= pol
->type
)
1841 error
= func(pol
, xfrm_policy_id2dir(pol
->index
),
1844 list_move_tail(&walk
->walk
.all
, &x
->all
);
1849 if (walk
->seq
== 0) {
1853 list_del_init(&walk
->walk
.all
);
1855 spin_unlock_bh(&net
->xfrm
.xfrm_policy_lock
);
1858 EXPORT_SYMBOL(xfrm_policy_walk
);
1860 void xfrm_policy_walk_init(struct xfrm_policy_walk
*walk
, u8 type
)
1862 INIT_LIST_HEAD(&walk
->walk
.all
);
1863 walk
->walk
.dead
= 1;
1867 EXPORT_SYMBOL(xfrm_policy_walk_init
);
1869 void xfrm_policy_walk_done(struct xfrm_policy_walk
*walk
, struct net
*net
)
1871 if (list_empty(&walk
->walk
.all
))
1874 spin_lock_bh(&net
->xfrm
.xfrm_policy_lock
); /*FIXME where is net? */
1875 list_del(&walk
->walk
.all
);
1876 spin_unlock_bh(&net
->xfrm
.xfrm_policy_lock
);
1878 EXPORT_SYMBOL(xfrm_policy_walk_done
);
1881 * Find policy to apply to this flow.
1883 * Returns 0 if policy found, else an -errno.
1885 static int xfrm_policy_match(const struct xfrm_policy
*pol
,
1886 const struct flowi
*fl
,
1887 u8 type
, u16 family
, int dir
, u32 if_id
)
1889 const struct xfrm_selector
*sel
= &pol
->selector
;
1893 if (pol
->family
!= family
||
1894 pol
->if_id
!= if_id
||
1895 (fl
->flowi_mark
& pol
->mark
.m
) != pol
->mark
.v
||
1899 match
= xfrm_selector_match(sel
, fl
, family
);
1901 ret
= security_xfrm_policy_lookup(pol
->security
, fl
->flowi_secid
,
1906 static struct xfrm_pol_inexact_node
*
1907 xfrm_policy_lookup_inexact_addr(const struct rb_root
*r
,
1909 const xfrm_address_t
*addr
, u16 family
)
1911 const struct rb_node
*parent
;
1915 seq
= read_seqcount_begin(count
);
1917 parent
= rcu_dereference_raw(r
->rb_node
);
1919 struct xfrm_pol_inexact_node
*node
;
1922 node
= rb_entry(parent
, struct xfrm_pol_inexact_node
, node
);
1924 delta
= xfrm_policy_addr_delta(addr
, &node
->addr
,
1925 node
->prefixlen
, family
);
1927 parent
= rcu_dereference_raw(parent
->rb_left
);
1929 } else if (delta
> 0) {
1930 parent
= rcu_dereference_raw(parent
->rb_right
);
1937 if (read_seqcount_retry(count
, seq
))
1944 xfrm_policy_find_inexact_candidates(struct xfrm_pol_inexact_candidates
*cand
,
1945 struct xfrm_pol_inexact_bin
*b
,
1946 const xfrm_address_t
*saddr
,
1947 const xfrm_address_t
*daddr
)
1949 struct xfrm_pol_inexact_node
*n
;
1955 family
= b
->k
.family
;
1956 memset(cand
, 0, sizeof(*cand
));
1957 cand
->res
[XFRM_POL_CAND_ANY
] = &b
->hhead
;
1959 n
= xfrm_policy_lookup_inexact_addr(&b
->root_d
, &b
->count
, daddr
,
1962 cand
->res
[XFRM_POL_CAND_DADDR
] = &n
->hhead
;
1963 n
= xfrm_policy_lookup_inexact_addr(&n
->root
, &b
->count
, saddr
,
1966 cand
->res
[XFRM_POL_CAND_BOTH
] = &n
->hhead
;
1969 n
= xfrm_policy_lookup_inexact_addr(&b
->root_s
, &b
->count
, saddr
,
1972 cand
->res
[XFRM_POL_CAND_SADDR
] = &n
->hhead
;
1977 static struct xfrm_pol_inexact_bin
*
1978 xfrm_policy_inexact_lookup_rcu(struct net
*net
, u8 type
, u16 family
,
1981 struct xfrm_pol_inexact_key k
= {
1988 write_pnet(&k
.net
, net
);
1990 return rhashtable_lookup(&xfrm_policy_inexact_table
, &k
,
1991 xfrm_pol_inexact_params
);
1994 static struct xfrm_pol_inexact_bin
*
1995 xfrm_policy_inexact_lookup(struct net
*net
, u8 type
, u16 family
,
1998 struct xfrm_pol_inexact_bin
*bin
;
2000 lockdep_assert_held(&net
->xfrm
.xfrm_policy_lock
);
2003 bin
= xfrm_policy_inexact_lookup_rcu(net
, type
, family
, dir
, if_id
);
2009 static struct xfrm_policy
*
2010 __xfrm_policy_eval_candidates(struct hlist_head
*chain
,
2011 struct xfrm_policy
*prefer
,
2012 const struct flowi
*fl
,
2013 u8 type
, u16 family
, int dir
, u32 if_id
)
2015 u32 priority
= prefer
? prefer
->priority
: ~0u;
2016 struct xfrm_policy
*pol
;
2021 hlist_for_each_entry_rcu(pol
, chain
, bydst
) {
2024 if (pol
->priority
> priority
)
2027 err
= xfrm_policy_match(pol
, fl
, type
, family
, dir
, if_id
);
2030 return ERR_PTR(err
);
2036 /* matches. Is it older than *prefer? */
2037 if (pol
->priority
== priority
&&
2038 prefer
->pos
< pol
->pos
)
2048 static struct xfrm_policy
*
2049 xfrm_policy_eval_candidates(struct xfrm_pol_inexact_candidates
*cand
,
2050 struct xfrm_policy
*prefer
,
2051 const struct flowi
*fl
,
2052 u8 type
, u16 family
, int dir
, u32 if_id
)
2054 struct xfrm_policy
*tmp
;
2057 for (i
= 0; i
< ARRAY_SIZE(cand
->res
); i
++) {
2058 tmp
= __xfrm_policy_eval_candidates(cand
->res
[i
],
2060 fl
, type
, family
, dir
,
2073 static struct xfrm_policy
*xfrm_policy_lookup_bytype(struct net
*net
, u8 type
,
2074 const struct flowi
*fl
,
2078 struct xfrm_pol_inexact_candidates cand
;
2079 const xfrm_address_t
*daddr
, *saddr
;
2080 struct xfrm_pol_inexact_bin
*bin
;
2081 struct xfrm_policy
*pol
, *ret
;
2082 struct hlist_head
*chain
;
2083 unsigned int sequence
;
2086 daddr
= xfrm_flowi_daddr(fl
, family
);
2087 saddr
= xfrm_flowi_saddr(fl
, family
);
2088 if (unlikely(!daddr
|| !saddr
))
2094 sequence
= read_seqcount_begin(&xfrm_policy_hash_generation
);
2095 chain
= policy_hash_direct(net
, daddr
, saddr
, family
, dir
);
2096 } while (read_seqcount_retry(&xfrm_policy_hash_generation
, sequence
));
2099 hlist_for_each_entry_rcu(pol
, chain
, bydst
) {
2100 err
= xfrm_policy_match(pol
, fl
, type
, family
, dir
, if_id
);
2113 bin
= xfrm_policy_inexact_lookup_rcu(net
, type
, family
, dir
, if_id
);
2114 if (!bin
|| !xfrm_policy_find_inexact_candidates(&cand
, bin
, saddr
,
2118 pol
= xfrm_policy_eval_candidates(&cand
, ret
, fl
, type
,
2119 family
, dir
, if_id
);
2127 if (read_seqcount_retry(&xfrm_policy_hash_generation
, sequence
))
2130 if (ret
&& !xfrm_pol_hold_rcu(ret
))
2138 static struct xfrm_policy
*xfrm_policy_lookup(struct net
*net
,
2139 const struct flowi
*fl
,
2140 u16 family
, u8 dir
, u32 if_id
)
2142 #ifdef CONFIG_XFRM_SUB_POLICY
2143 struct xfrm_policy
*pol
;
2145 pol
= xfrm_policy_lookup_bytype(net
, XFRM_POLICY_TYPE_SUB
, fl
, family
,
2150 return xfrm_policy_lookup_bytype(net
, XFRM_POLICY_TYPE_MAIN
, fl
, family
,
2154 static struct xfrm_policy
*xfrm_sk_policy_lookup(const struct sock
*sk
, int dir
,
2155 const struct flowi
*fl
,
2156 u16 family
, u32 if_id
)
2158 struct xfrm_policy
*pol
;
2162 pol
= rcu_dereference(sk
->sk_policy
[dir
]);
2167 if (pol
->family
!= family
) {
2172 match
= xfrm_selector_match(&pol
->selector
, fl
, family
);
2174 if ((sk
->sk_mark
& pol
->mark
.m
) != pol
->mark
.v
||
2175 pol
->if_id
!= if_id
) {
2179 err
= security_xfrm_policy_lookup(pol
->security
,
2183 if (!xfrm_pol_hold_rcu(pol
))
2185 } else if (err
== -ESRCH
) {
2198 static void __xfrm_policy_link(struct xfrm_policy
*pol
, int dir
)
2200 struct net
*net
= xp_net(pol
);
2202 list_add(&pol
->walk
.all
, &net
->xfrm
.policy_all
);
2203 net
->xfrm
.policy_count
[dir
]++;
2207 static struct xfrm_policy
*__xfrm_policy_unlink(struct xfrm_policy
*pol
,
2210 struct net
*net
= xp_net(pol
);
2212 if (list_empty(&pol
->walk
.all
))
2215 /* Socket policies are not hashed. */
2216 if (!hlist_unhashed(&pol
->bydst
)) {
2217 hlist_del_rcu(&pol
->bydst
);
2218 hlist_del_init(&pol
->bydst_inexact_list
);
2219 hlist_del(&pol
->byidx
);
2222 list_del_init(&pol
->walk
.all
);
2223 net
->xfrm
.policy_count
[dir
]--;
2228 static void xfrm_sk_policy_link(struct xfrm_policy
*pol
, int dir
)
2230 __xfrm_policy_link(pol
, XFRM_POLICY_MAX
+ dir
);
2233 static void xfrm_sk_policy_unlink(struct xfrm_policy
*pol
, int dir
)
2235 __xfrm_policy_unlink(pol
, XFRM_POLICY_MAX
+ dir
);
2238 int xfrm_policy_delete(struct xfrm_policy
*pol
, int dir
)
2240 struct net
*net
= xp_net(pol
);
2242 spin_lock_bh(&net
->xfrm
.xfrm_policy_lock
);
2243 pol
= __xfrm_policy_unlink(pol
, dir
);
2244 spin_unlock_bh(&net
->xfrm
.xfrm_policy_lock
);
2246 xfrm_policy_kill(pol
);
2251 EXPORT_SYMBOL(xfrm_policy_delete
);
2253 int xfrm_sk_policy_insert(struct sock
*sk
, int dir
, struct xfrm_policy
*pol
)
2255 struct net
*net
= sock_net(sk
);
2256 struct xfrm_policy
*old_pol
;
2258 #ifdef CONFIG_XFRM_SUB_POLICY
2259 if (pol
&& pol
->type
!= XFRM_POLICY_TYPE_MAIN
)
2263 spin_lock_bh(&net
->xfrm
.xfrm_policy_lock
);
2264 old_pol
= rcu_dereference_protected(sk
->sk_policy
[dir
],
2265 lockdep_is_held(&net
->xfrm
.xfrm_policy_lock
));
2267 pol
->curlft
.add_time
= ktime_get_real_seconds();
2268 pol
->index
= xfrm_gen_index(net
, XFRM_POLICY_MAX
+dir
, 0);
2269 xfrm_sk_policy_link(pol
, dir
);
2271 rcu_assign_pointer(sk
->sk_policy
[dir
], pol
);
2274 xfrm_policy_requeue(old_pol
, pol
);
2276 /* Unlinking succeeds always. This is the only function
2277 * allowed to delete or replace socket policy.
2279 xfrm_sk_policy_unlink(old_pol
, dir
);
2281 spin_unlock_bh(&net
->xfrm
.xfrm_policy_lock
);
2284 xfrm_policy_kill(old_pol
);
2289 static struct xfrm_policy
*clone_policy(const struct xfrm_policy
*old
, int dir
)
2291 struct xfrm_policy
*newp
= xfrm_policy_alloc(xp_net(old
), GFP_ATOMIC
);
2292 struct net
*net
= xp_net(old
);
2295 newp
->selector
= old
->selector
;
2296 if (security_xfrm_policy_clone(old
->security
,
2299 return NULL
; /* ENOMEM */
2301 newp
->lft
= old
->lft
;
2302 newp
->curlft
= old
->curlft
;
2303 newp
->mark
= old
->mark
;
2304 newp
->if_id
= old
->if_id
;
2305 newp
->action
= old
->action
;
2306 newp
->flags
= old
->flags
;
2307 newp
->xfrm_nr
= old
->xfrm_nr
;
2308 newp
->index
= old
->index
;
2309 newp
->type
= old
->type
;
2310 newp
->family
= old
->family
;
2311 memcpy(newp
->xfrm_vec
, old
->xfrm_vec
,
2312 newp
->xfrm_nr
*sizeof(struct xfrm_tmpl
));
2313 spin_lock_bh(&net
->xfrm
.xfrm_policy_lock
);
2314 xfrm_sk_policy_link(newp
, dir
);
2315 spin_unlock_bh(&net
->xfrm
.xfrm_policy_lock
);
2321 int __xfrm_sk_clone_policy(struct sock
*sk
, const struct sock
*osk
)
2323 const struct xfrm_policy
*p
;
2324 struct xfrm_policy
*np
;
2328 for (i
= 0; i
< 2; i
++) {
2329 p
= rcu_dereference(osk
->sk_policy
[i
]);
2331 np
= clone_policy(p
, i
);
2332 if (unlikely(!np
)) {
2336 rcu_assign_pointer(sk
->sk_policy
[i
], np
);
2344 xfrm_get_saddr(struct net
*net
, int oif
, xfrm_address_t
*local
,
2345 xfrm_address_t
*remote
, unsigned short family
, u32 mark
)
2348 const struct xfrm_policy_afinfo
*afinfo
= xfrm_policy_get_afinfo(family
);
2350 if (unlikely(afinfo
== NULL
))
2352 err
= afinfo
->get_saddr(net
, oif
, local
, remote
, mark
);
2357 /* Resolve list of templates for the flow, given policy. */
2360 xfrm_tmpl_resolve_one(struct xfrm_policy
*policy
, const struct flowi
*fl
,
2361 struct xfrm_state
**xfrm
, unsigned short family
)
2363 struct net
*net
= xp_net(policy
);
2366 xfrm_address_t
*daddr
= xfrm_flowi_daddr(fl
, family
);
2367 xfrm_address_t
*saddr
= xfrm_flowi_saddr(fl
, family
);
2370 for (nx
= 0, i
= 0; i
< policy
->xfrm_nr
; i
++) {
2371 struct xfrm_state
*x
;
2372 xfrm_address_t
*remote
= daddr
;
2373 xfrm_address_t
*local
= saddr
;
2374 struct xfrm_tmpl
*tmpl
= &policy
->xfrm_vec
[i
];
2376 if (tmpl
->mode
== XFRM_MODE_TUNNEL
||
2377 tmpl
->mode
== XFRM_MODE_BEET
) {
2378 remote
= &tmpl
->id
.daddr
;
2379 local
= &tmpl
->saddr
;
2380 if (xfrm_addr_any(local
, tmpl
->encap_family
)) {
2381 error
= xfrm_get_saddr(net
, fl
->flowi_oif
,
2383 tmpl
->encap_family
, 0);
2390 x
= xfrm_state_find(remote
, local
, fl
, tmpl
, policy
, &error
,
2391 family
, policy
->if_id
);
2393 if (x
&& x
->km
.state
== XFRM_STATE_VALID
) {
2400 error
= (x
->km
.state
== XFRM_STATE_ERROR
?
2403 } else if (error
== -ESRCH
) {
2407 if (!tmpl
->optional
)
2413 for (nx
--; nx
>= 0; nx
--)
2414 xfrm_state_put(xfrm
[nx
]);
2419 xfrm_tmpl_resolve(struct xfrm_policy
**pols
, int npols
, const struct flowi
*fl
,
2420 struct xfrm_state
**xfrm
, unsigned short family
)
2422 struct xfrm_state
*tp
[XFRM_MAX_DEPTH
];
2423 struct xfrm_state
**tpp
= (npols
> 1) ? tp
: xfrm
;
2429 for (i
= 0; i
< npols
; i
++) {
2430 if (cnx
+ pols
[i
]->xfrm_nr
>= XFRM_MAX_DEPTH
) {
2435 ret
= xfrm_tmpl_resolve_one(pols
[i
], fl
, &tpp
[cnx
], family
);
2443 /* found states are sorted for outbound processing */
2445 xfrm_state_sort(xfrm
, tpp
, cnx
, family
);
2450 for (cnx
--; cnx
>= 0; cnx
--)
2451 xfrm_state_put(tpp
[cnx
]);
2456 static int xfrm_get_tos(const struct flowi
*fl
, int family
)
2458 if (family
== AF_INET
)
2459 return IPTOS_RT_MASK
& fl
->u
.ip4
.flowi4_tos
;
2464 static inline struct xfrm_dst
*xfrm_alloc_dst(struct net
*net
, int family
)
2466 const struct xfrm_policy_afinfo
*afinfo
= xfrm_policy_get_afinfo(family
);
2467 struct dst_ops
*dst_ops
;
2468 struct xfrm_dst
*xdst
;
2471 return ERR_PTR(-EINVAL
);
2475 dst_ops
= &net
->xfrm
.xfrm4_dst_ops
;
2477 #if IS_ENABLED(CONFIG_IPV6)
2479 dst_ops
= &net
->xfrm
.xfrm6_dst_ops
;
2485 xdst
= dst_alloc(dst_ops
, NULL
, 1, DST_OBSOLETE_NONE
, 0);
2488 struct dst_entry
*dst
= &xdst
->u
.dst
;
2490 memset(dst
+ 1, 0, sizeof(*xdst
) - sizeof(*dst
));
2492 xdst
= ERR_PTR(-ENOBUFS
);
2499 static void xfrm_init_path(struct xfrm_dst
*path
, struct dst_entry
*dst
,
2502 if (dst
->ops
->family
== AF_INET6
) {
2503 struct rt6_info
*rt
= (struct rt6_info
*)dst
;
2504 path
->path_cookie
= rt6_get_cookie(rt
);
2505 path
->u
.rt6
.rt6i_nfheader_len
= nfheader_len
;
2509 static inline int xfrm_fill_dst(struct xfrm_dst
*xdst
, struct net_device
*dev
,
2510 const struct flowi
*fl
)
2512 const struct xfrm_policy_afinfo
*afinfo
=
2513 xfrm_policy_get_afinfo(xdst
->u
.dst
.ops
->family
);
2519 err
= afinfo
->fill_dst(xdst
, dev
, fl
);
2527 /* Allocate chain of dst_entry's, attach known xfrm's, calculate
2528 * all the metrics... Shortly, bundle a bundle.
2531 static struct dst_entry
*xfrm_bundle_create(struct xfrm_policy
*policy
,
2532 struct xfrm_state
**xfrm
,
2533 struct xfrm_dst
**bundle
,
2535 const struct flowi
*fl
,
2536 struct dst_entry
*dst
)
2538 const struct xfrm_state_afinfo
*afinfo
;
2539 const struct xfrm_mode
*inner_mode
;
2540 struct net
*net
= xp_net(policy
);
2541 unsigned long now
= jiffies
;
2542 struct net_device
*dev
;
2543 struct xfrm_dst
*xdst_prev
= NULL
;
2544 struct xfrm_dst
*xdst0
= NULL
;
2548 int nfheader_len
= 0;
2549 int trailer_len
= 0;
2551 int family
= policy
->selector
.family
;
2552 xfrm_address_t saddr
, daddr
;
2554 xfrm_flowi_addr_get(fl
, &saddr
, &daddr
, family
);
2556 tos
= xfrm_get_tos(fl
, family
);
2560 for (; i
< nx
; i
++) {
2561 struct xfrm_dst
*xdst
= xfrm_alloc_dst(net
, family
);
2562 struct dst_entry
*dst1
= &xdst
->u
.dst
;
2564 err
= PTR_ERR(xdst
);
2574 /* Ref count is taken during xfrm_alloc_dst()
2575 * No need to do dst_clone() on dst1
2577 xfrm_dst_set_child(xdst_prev
, &xdst
->u
.dst
);
2579 if (xfrm
[i
]->sel
.family
== AF_UNSPEC
) {
2580 inner_mode
= xfrm_ip2inner_mode(xfrm
[i
],
2581 xfrm_af2proto(family
));
2583 err
= -EAFNOSUPPORT
;
2588 inner_mode
= &xfrm
[i
]->inner_mode
;
2591 dst_copy_metrics(dst1
, dst
);
2593 if (xfrm
[i
]->props
.mode
!= XFRM_MODE_TRANSPORT
) {
2596 if (xfrm
[i
]->props
.smark
.v
|| xfrm
[i
]->props
.smark
.m
)
2597 mark
= xfrm_smark_get(fl
->flowi_mark
, xfrm
[i
]);
2599 family
= xfrm
[i
]->props
.family
;
2600 dst
= xfrm_dst_lookup(xfrm
[i
], tos
, fl
->flowi_oif
,
2601 &saddr
, &daddr
, family
, mark
);
2608 dst1
->xfrm
= xfrm
[i
];
2609 xdst
->xfrm_genid
= xfrm
[i
]->genid
;
2611 dst1
->obsolete
= DST_OBSOLETE_FORCE_CHK
;
2612 dst1
->flags
|= DST_HOST
;
2613 dst1
->lastuse
= now
;
2615 dst1
->input
= dst_discard
;
2618 afinfo
= xfrm_state_afinfo_get_rcu(inner_mode
->family
);
2620 dst1
->output
= afinfo
->output
;
2622 dst1
->output
= dst_discard_out
;
2627 header_len
+= xfrm
[i
]->props
.header_len
;
2628 if (xfrm
[i
]->type
->flags
& XFRM_TYPE_NON_FRAGMENT
)
2629 nfheader_len
+= xfrm
[i
]->props
.header_len
;
2630 trailer_len
+= xfrm
[i
]->props
.trailer_len
;
2633 xfrm_dst_set_child(xdst_prev
, dst
);
2641 xfrm_init_path(xdst0
, dst
, nfheader_len
);
2642 xfrm_init_pmtu(bundle
, nx
);
2644 for (xdst_prev
= xdst0
; xdst_prev
!= (struct xfrm_dst
*)dst
;
2645 xdst_prev
= (struct xfrm_dst
*) xfrm_dst_child(&xdst_prev
->u
.dst
)) {
2646 err
= xfrm_fill_dst(xdst_prev
, dev
, fl
);
2650 xdst_prev
->u
.dst
.header_len
= header_len
;
2651 xdst_prev
->u
.dst
.trailer_len
= trailer_len
;
2652 header_len
-= xdst_prev
->u
.dst
.xfrm
->props
.header_len
;
2653 trailer_len
-= xdst_prev
->u
.dst
.xfrm
->props
.trailer_len
;
2656 return &xdst0
->u
.dst
;
2660 xfrm_state_put(xfrm
[i
]);
2663 dst_release_immediate(&xdst0
->u
.dst
);
2665 return ERR_PTR(err
);
2668 static int xfrm_expand_policies(const struct flowi
*fl
, u16 family
,
2669 struct xfrm_policy
**pols
,
2670 int *num_pols
, int *num_xfrms
)
2674 if (*num_pols
== 0 || !pols
[0]) {
2679 if (IS_ERR(pols
[0]))
2680 return PTR_ERR(pols
[0]);
2682 *num_xfrms
= pols
[0]->xfrm_nr
;
2684 #ifdef CONFIG_XFRM_SUB_POLICY
2685 if (pols
[0] && pols
[0]->action
== XFRM_POLICY_ALLOW
&&
2686 pols
[0]->type
!= XFRM_POLICY_TYPE_MAIN
) {
2687 pols
[1] = xfrm_policy_lookup_bytype(xp_net(pols
[0]),
2688 XFRM_POLICY_TYPE_MAIN
,
2693 if (IS_ERR(pols
[1])) {
2694 xfrm_pols_put(pols
, *num_pols
);
2695 return PTR_ERR(pols
[1]);
2698 (*num_xfrms
) += pols
[1]->xfrm_nr
;
2702 for (i
= 0; i
< *num_pols
; i
++) {
2703 if (pols
[i
]->action
!= XFRM_POLICY_ALLOW
) {
2713 static struct xfrm_dst
*
2714 xfrm_resolve_and_create_bundle(struct xfrm_policy
**pols
, int num_pols
,
2715 const struct flowi
*fl
, u16 family
,
2716 struct dst_entry
*dst_orig
)
2718 struct net
*net
= xp_net(pols
[0]);
2719 struct xfrm_state
*xfrm
[XFRM_MAX_DEPTH
];
2720 struct xfrm_dst
*bundle
[XFRM_MAX_DEPTH
];
2721 struct xfrm_dst
*xdst
;
2722 struct dst_entry
*dst
;
2725 /* Try to instantiate a bundle */
2726 err
= xfrm_tmpl_resolve(pols
, num_pols
, fl
, xfrm
, family
);
2732 XFRM_INC_STATS(net
, LINUX_MIB_XFRMOUTPOLERROR
);
2733 return ERR_PTR(err
);
2736 dst
= xfrm_bundle_create(pols
[0], xfrm
, bundle
, err
, fl
, dst_orig
);
2738 XFRM_INC_STATS(net
, LINUX_MIB_XFRMOUTBUNDLEGENERROR
);
2739 return ERR_CAST(dst
);
2742 xdst
= (struct xfrm_dst
*)dst
;
2743 xdst
->num_xfrms
= err
;
2744 xdst
->num_pols
= num_pols
;
2745 memcpy(xdst
->pols
, pols
, sizeof(struct xfrm_policy
*) * num_pols
);
2746 xdst
->policy_genid
= atomic_read(&pols
[0]->genid
);
2751 static void xfrm_policy_queue_process(struct timer_list
*t
)
2753 struct sk_buff
*skb
;
2755 struct dst_entry
*dst
;
2756 struct xfrm_policy
*pol
= from_timer(pol
, t
, polq
.hold_timer
);
2757 struct net
*net
= xp_net(pol
);
2758 struct xfrm_policy_queue
*pq
= &pol
->polq
;
2760 struct sk_buff_head list
;
2762 spin_lock(&pq
->hold_queue
.lock
);
2763 skb
= skb_peek(&pq
->hold_queue
);
2765 spin_unlock(&pq
->hold_queue
.lock
);
2770 xfrm_decode_session(skb
, &fl
, dst
->ops
->family
);
2771 spin_unlock(&pq
->hold_queue
.lock
);
2773 dst_hold(xfrm_dst_path(dst
));
2774 dst
= xfrm_lookup(net
, xfrm_dst_path(dst
), &fl
, sk
, XFRM_LOOKUP_QUEUE
);
2778 if (dst
->flags
& DST_XFRM_QUEUE
) {
2781 if (pq
->timeout
>= XFRM_QUEUE_TMO_MAX
)
2784 pq
->timeout
= pq
->timeout
<< 1;
2785 if (!mod_timer(&pq
->hold_timer
, jiffies
+ pq
->timeout
))
2792 __skb_queue_head_init(&list
);
2794 spin_lock(&pq
->hold_queue
.lock
);
2796 skb_queue_splice_init(&pq
->hold_queue
, &list
);
2797 spin_unlock(&pq
->hold_queue
.lock
);
2799 while (!skb_queue_empty(&list
)) {
2800 skb
= __skb_dequeue(&list
);
2802 xfrm_decode_session(skb
, &fl
, skb_dst(skb
)->ops
->family
);
2803 dst_hold(xfrm_dst_path(skb_dst(skb
)));
2804 dst
= xfrm_lookup(net
, xfrm_dst_path(skb_dst(skb
)), &fl
, skb
->sk
, 0);
2812 skb_dst_set(skb
, dst
);
2814 dst_output(net
, skb
->sk
, skb
);
2823 skb_queue_purge(&pq
->hold_queue
);
2827 static int xdst_queue_output(struct net
*net
, struct sock
*sk
, struct sk_buff
*skb
)
2829 unsigned long sched_next
;
2830 struct dst_entry
*dst
= skb_dst(skb
);
2831 struct xfrm_dst
*xdst
= (struct xfrm_dst
*) dst
;
2832 struct xfrm_policy
*pol
= xdst
->pols
[0];
2833 struct xfrm_policy_queue
*pq
= &pol
->polq
;
2835 if (unlikely(skb_fclone_busy(sk
, skb
))) {
2840 if (pq
->hold_queue
.qlen
> XFRM_MAX_QUEUE_LEN
) {
2847 spin_lock_bh(&pq
->hold_queue
.lock
);
2850 pq
->timeout
= XFRM_QUEUE_TMO_MIN
;
2852 sched_next
= jiffies
+ pq
->timeout
;
2854 if (del_timer(&pq
->hold_timer
)) {
2855 if (time_before(pq
->hold_timer
.expires
, sched_next
))
2856 sched_next
= pq
->hold_timer
.expires
;
2860 __skb_queue_tail(&pq
->hold_queue
, skb
);
2861 if (!mod_timer(&pq
->hold_timer
, sched_next
))
2864 spin_unlock_bh(&pq
->hold_queue
.lock
);
2869 static struct xfrm_dst
*xfrm_create_dummy_bundle(struct net
*net
,
2870 struct xfrm_flo
*xflo
,
2871 const struct flowi
*fl
,
2876 struct net_device
*dev
;
2877 struct dst_entry
*dst
;
2878 struct dst_entry
*dst1
;
2879 struct xfrm_dst
*xdst
;
2881 xdst
= xfrm_alloc_dst(net
, family
);
2885 if (!(xflo
->flags
& XFRM_LOOKUP_QUEUE
) ||
2886 net
->xfrm
.sysctl_larval_drop
||
2890 dst
= xflo
->dst_orig
;
2891 dst1
= &xdst
->u
.dst
;
2895 dst_copy_metrics(dst1
, dst
);
2897 dst1
->obsolete
= DST_OBSOLETE_FORCE_CHK
;
2898 dst1
->flags
|= DST_HOST
| DST_XFRM_QUEUE
;
2899 dst1
->lastuse
= jiffies
;
2901 dst1
->input
= dst_discard
;
2902 dst1
->output
= xdst_queue_output
;
2905 xfrm_dst_set_child(xdst
, dst
);
2908 xfrm_init_path((struct xfrm_dst
*)dst1
, dst
, 0);
2915 err
= xfrm_fill_dst(xdst
, dev
, fl
);
2924 xdst
= ERR_PTR(err
);
2928 static struct xfrm_dst
*xfrm_bundle_lookup(struct net
*net
,
2929 const struct flowi
*fl
,
2931 struct xfrm_flo
*xflo
, u32 if_id
)
2933 struct xfrm_policy
*pols
[XFRM_POLICY_TYPE_MAX
];
2934 int num_pols
= 0, num_xfrms
= 0, err
;
2935 struct xfrm_dst
*xdst
;
2937 /* Resolve policies to use if we couldn't get them from
2938 * previous cache entry */
2940 pols
[0] = xfrm_policy_lookup(net
, fl
, family
, dir
, if_id
);
2941 err
= xfrm_expand_policies(fl
, family
, pols
,
2942 &num_pols
, &num_xfrms
);
2948 goto make_dummy_bundle
;
2950 xdst
= xfrm_resolve_and_create_bundle(pols
, num_pols
, fl
, family
,
2953 err
= PTR_ERR(xdst
);
2954 if (err
== -EREMOTE
) {
2955 xfrm_pols_put(pols
, num_pols
);
2961 goto make_dummy_bundle
;
2962 } else if (xdst
== NULL
) {
2964 goto make_dummy_bundle
;
2970 /* We found policies, but there's no bundles to instantiate:
2971 * either because the policy blocks, has no transformations or
2972 * we could not build template (no xfrm_states).*/
2973 xdst
= xfrm_create_dummy_bundle(net
, xflo
, fl
, num_xfrms
, family
);
2975 xfrm_pols_put(pols
, num_pols
);
2976 return ERR_CAST(xdst
);
2978 xdst
->num_pols
= num_pols
;
2979 xdst
->num_xfrms
= num_xfrms
;
2980 memcpy(xdst
->pols
, pols
, sizeof(struct xfrm_policy
*) * num_pols
);
2985 XFRM_INC_STATS(net
, LINUX_MIB_XFRMOUTPOLERROR
);
2987 xfrm_pols_put(pols
, num_pols
);
2988 return ERR_PTR(err
);
2991 static struct dst_entry
*make_blackhole(struct net
*net
, u16 family
,
2992 struct dst_entry
*dst_orig
)
2994 const struct xfrm_policy_afinfo
*afinfo
= xfrm_policy_get_afinfo(family
);
2995 struct dst_entry
*ret
;
2998 dst_release(dst_orig
);
2999 return ERR_PTR(-EINVAL
);
3001 ret
= afinfo
->blackhole_route(net
, dst_orig
);
3008 /* Finds/creates a bundle for given flow and if_id
3010 * At the moment we eat a raw IP route. Mostly to speed up lookups
3011 * on interfaces with disabled IPsec.
3013 * xfrm_lookup uses an if_id of 0 by default, and is provided for
3016 struct dst_entry
*xfrm_lookup_with_ifid(struct net
*net
,
3017 struct dst_entry
*dst_orig
,
3018 const struct flowi
*fl
,
3019 const struct sock
*sk
,
3020 int flags
, u32 if_id
)
3022 struct xfrm_policy
*pols
[XFRM_POLICY_TYPE_MAX
];
3023 struct xfrm_dst
*xdst
;
3024 struct dst_entry
*dst
, *route
;
3025 u16 family
= dst_orig
->ops
->family
;
3026 u8 dir
= XFRM_POLICY_OUT
;
3027 int i
, err
, num_pols
, num_xfrms
= 0, drop_pols
= 0;
3033 sk
= sk_const_to_full_sk(sk
);
3034 if (sk
&& sk
->sk_policy
[XFRM_POLICY_OUT
]) {
3036 pols
[0] = xfrm_sk_policy_lookup(sk
, XFRM_POLICY_OUT
, fl
, family
,
3038 err
= xfrm_expand_policies(fl
, family
, pols
,
3039 &num_pols
, &num_xfrms
);
3044 if (num_xfrms
<= 0) {
3045 drop_pols
= num_pols
;
3049 xdst
= xfrm_resolve_and_create_bundle(
3054 xfrm_pols_put(pols
, num_pols
);
3055 err
= PTR_ERR(xdst
);
3056 if (err
== -EREMOTE
)
3060 } else if (xdst
== NULL
) {
3062 drop_pols
= num_pols
;
3066 route
= xdst
->route
;
3071 struct xfrm_flo xflo
;
3073 xflo
.dst_orig
= dst_orig
;
3076 /* To accelerate a bit... */
3077 if ((dst_orig
->flags
& DST_NOXFRM
) ||
3078 !net
->xfrm
.policy_count
[XFRM_POLICY_OUT
])
3081 xdst
= xfrm_bundle_lookup(net
, fl
, family
, dir
, &xflo
, if_id
);
3085 err
= PTR_ERR(xdst
);
3089 num_pols
= xdst
->num_pols
;
3090 num_xfrms
= xdst
->num_xfrms
;
3091 memcpy(pols
, xdst
->pols
, sizeof(struct xfrm_policy
*) * num_pols
);
3092 route
= xdst
->route
;
3096 if (route
== NULL
&& num_xfrms
> 0) {
3097 /* The only case when xfrm_bundle_lookup() returns a
3098 * bundle with null route, is when the template could
3099 * not be resolved. It means policies are there, but
3100 * bundle could not be created, since we don't yet
3101 * have the xfrm_state's. We need to wait for KM to
3102 * negotiate new SA's or bail out with error.*/
3103 if (net
->xfrm
.sysctl_larval_drop
) {
3104 XFRM_INC_STATS(net
, LINUX_MIB_XFRMOUTNOSTATES
);
3111 XFRM_INC_STATS(net
, LINUX_MIB_XFRMOUTNOSTATES
);
3119 if ((flags
& XFRM_LOOKUP_ICMP
) &&
3120 !(pols
[0]->flags
& XFRM_POLICY_ICMP
)) {
3125 for (i
= 0; i
< num_pols
; i
++)
3126 pols
[i
]->curlft
.use_time
= ktime_get_real_seconds();
3128 if (num_xfrms
< 0) {
3129 /* Prohibit the flow */
3130 XFRM_INC_STATS(net
, LINUX_MIB_XFRMOUTPOLBLOCK
);
3133 } else if (num_xfrms
> 0) {
3134 /* Flow transformed */
3135 dst_release(dst_orig
);
3137 /* Flow passes untransformed */
3142 xfrm_pols_put(pols
, drop_pols
);
3143 if (dst
&& dst
->xfrm
&&
3144 dst
->xfrm
->props
.mode
== XFRM_MODE_TUNNEL
)
3145 dst
->flags
|= DST_XFRM_TUNNEL
;
3149 if (!(flags
& XFRM_LOOKUP_ICMP
)) {
3157 if (!(flags
& XFRM_LOOKUP_KEEP_DST_REF
))
3158 dst_release(dst_orig
);
3159 xfrm_pols_put(pols
, drop_pols
);
3160 return ERR_PTR(err
);
3162 EXPORT_SYMBOL(xfrm_lookup_with_ifid
);
3164 /* Main function: finds/creates a bundle for given flow.
3166 * At the moment we eat a raw IP route. Mostly to speed up lookups
3167 * on interfaces with disabled IPsec.
3169 struct dst_entry
*xfrm_lookup(struct net
*net
, struct dst_entry
*dst_orig
,
3170 const struct flowi
*fl
, const struct sock
*sk
,
3173 return xfrm_lookup_with_ifid(net
, dst_orig
, fl
, sk
, flags
, 0);
3175 EXPORT_SYMBOL(xfrm_lookup
);
3177 /* Callers of xfrm_lookup_route() must ensure a call to dst_output().
3178 * Otherwise we may send out blackholed packets.
3180 struct dst_entry
*xfrm_lookup_route(struct net
*net
, struct dst_entry
*dst_orig
,
3181 const struct flowi
*fl
,
3182 const struct sock
*sk
, int flags
)
3184 struct dst_entry
*dst
= xfrm_lookup(net
, dst_orig
, fl
, sk
,
3185 flags
| XFRM_LOOKUP_QUEUE
|
3186 XFRM_LOOKUP_KEEP_DST_REF
);
3188 if (IS_ERR(dst
) && PTR_ERR(dst
) == -EREMOTE
)
3189 return make_blackhole(net
, dst_orig
->ops
->family
, dst_orig
);
3192 dst_release(dst_orig
);
3196 EXPORT_SYMBOL(xfrm_lookup_route
);
3199 xfrm_secpath_reject(int idx
, struct sk_buff
*skb
, const struct flowi
*fl
)
3201 struct sec_path
*sp
= skb_sec_path(skb
);
3202 struct xfrm_state
*x
;
3204 if (!sp
|| idx
< 0 || idx
>= sp
->len
)
3207 if (!x
->type
->reject
)
3209 return x
->type
->reject(x
, skb
, fl
);
3212 /* When skb is transformed back to its "native" form, we have to
3213 * check policy restrictions. At the moment we make this in maximally
3214 * stupid way. Shame on me. :-) Of course, connected sockets must
3215 * have policy cached at them.
3219 xfrm_state_ok(const struct xfrm_tmpl
*tmpl
, const struct xfrm_state
*x
,
3220 unsigned short family
)
3222 if (xfrm_state_kern(x
))
3223 return tmpl
->optional
&& !xfrm_state_addr_cmp(tmpl
, x
, tmpl
->encap_family
);
3224 return x
->id
.proto
== tmpl
->id
.proto
&&
3225 (x
->id
.spi
== tmpl
->id
.spi
|| !tmpl
->id
.spi
) &&
3226 (x
->props
.reqid
== tmpl
->reqid
|| !tmpl
->reqid
) &&
3227 x
->props
.mode
== tmpl
->mode
&&
3228 (tmpl
->allalgs
|| (tmpl
->aalgos
& (1<<x
->props
.aalgo
)) ||
3229 !(xfrm_id_proto_match(tmpl
->id
.proto
, IPSEC_PROTO_ANY
))) &&
3230 !(x
->props
.mode
!= XFRM_MODE_TRANSPORT
&&
3231 xfrm_state_addr_cmp(tmpl
, x
, family
));
3235 * 0 or more than 0 is returned when validation is succeeded (either bypass
3236 * because of optional transport mode, or next index of the mathced secpath
3237 * state with the template.
3238 * -1 is returned when no matching template is found.
3239 * Otherwise "-2 - errored_index" is returned.
3242 xfrm_policy_ok(const struct xfrm_tmpl
*tmpl
, const struct sec_path
*sp
, int start
,
3243 unsigned short family
)
3247 if (tmpl
->optional
) {
3248 if (tmpl
->mode
== XFRM_MODE_TRANSPORT
)
3252 for (; idx
< sp
->len
; idx
++) {
3253 if (xfrm_state_ok(tmpl
, sp
->xvec
[idx
], family
))
3255 if (sp
->xvec
[idx
]->props
.mode
!= XFRM_MODE_TRANSPORT
) {
3265 decode_session4(struct sk_buff
*skb
, struct flowi
*fl
, bool reverse
)
3267 const struct iphdr
*iph
= ip_hdr(skb
);
3269 u8
*xprth
= skb_network_header(skb
) + ihl
* 4;
3270 struct flowi4
*fl4
= &fl
->u
.ip4
;
3274 oif
= skb_dst(skb
)->dev
->ifindex
;
3276 memset(fl4
, 0, sizeof(struct flowi4
));
3277 fl4
->flowi4_mark
= skb
->mark
;
3278 fl4
->flowi4_oif
= reverse
? skb
->skb_iif
: oif
;
3280 fl4
->flowi4_proto
= iph
->protocol
;
3281 fl4
->daddr
= reverse
? iph
->saddr
: iph
->daddr
;
3282 fl4
->saddr
= reverse
? iph
->daddr
: iph
->saddr
;
3283 fl4
->flowi4_tos
= iph
->tos
;
3285 if (!ip_is_fragment(iph
)) {
3286 switch (iph
->protocol
) {
3288 case IPPROTO_UDPLITE
:
3292 if (xprth
+ 4 < skb
->data
||
3293 pskb_may_pull(skb
, xprth
+ 4 - skb
->data
)) {
3296 xprth
= skb_network_header(skb
) + ihl
* 4;
3297 ports
= (__be16
*)xprth
;
3299 fl4
->fl4_sport
= ports
[!!reverse
];
3300 fl4
->fl4_dport
= ports
[!reverse
];
3304 if (xprth
+ 2 < skb
->data
||
3305 pskb_may_pull(skb
, xprth
+ 2 - skb
->data
)) {
3308 xprth
= skb_network_header(skb
) + ihl
* 4;
3311 fl4
->fl4_icmp_type
= icmp
[0];
3312 fl4
->fl4_icmp_code
= icmp
[1];
3316 if (xprth
+ 4 < skb
->data
||
3317 pskb_may_pull(skb
, xprth
+ 4 - skb
->data
)) {
3320 xprth
= skb_network_header(skb
) + ihl
* 4;
3321 ehdr
= (__be32
*)xprth
;
3323 fl4
->fl4_ipsec_spi
= ehdr
[0];
3327 if (xprth
+ 8 < skb
->data
||
3328 pskb_may_pull(skb
, xprth
+ 8 - skb
->data
)) {
3331 xprth
= skb_network_header(skb
) + ihl
* 4;
3332 ah_hdr
= (__be32
*)xprth
;
3334 fl4
->fl4_ipsec_spi
= ah_hdr
[1];
3338 if (xprth
+ 4 < skb
->data
||
3339 pskb_may_pull(skb
, xprth
+ 4 - skb
->data
)) {
3342 xprth
= skb_network_header(skb
) + ihl
* 4;
3343 ipcomp_hdr
= (__be16
*)xprth
;
3345 fl4
->fl4_ipsec_spi
= htonl(ntohs(ipcomp_hdr
[1]));
3349 if (xprth
+ 12 < skb
->data
||
3350 pskb_may_pull(skb
, xprth
+ 12 - skb
->data
)) {
3354 xprth
= skb_network_header(skb
) + ihl
* 4;
3355 greflags
= (__be16
*)xprth
;
3356 gre_hdr
= (__be32
*)xprth
;
3358 if (greflags
[0] & GRE_KEY
) {
3359 if (greflags
[0] & GRE_CSUM
)
3361 fl4
->fl4_gre_key
= gre_hdr
[1];
3366 fl4
->fl4_ipsec_spi
= 0;
3372 #if IS_ENABLED(CONFIG_IPV6)
3374 decode_session6(struct sk_buff
*skb
, struct flowi
*fl
, bool reverse
)
3376 struct flowi6
*fl6
= &fl
->u
.ip6
;
3378 const struct ipv6hdr
*hdr
= ipv6_hdr(skb
);
3379 u32 offset
= sizeof(*hdr
);
3380 struct ipv6_opt_hdr
*exthdr
;
3381 const unsigned char *nh
= skb_network_header(skb
);
3382 u16 nhoff
= IP6CB(skb
)->nhoff
;
3387 nhoff
= offsetof(struct ipv6hdr
, nexthdr
);
3389 nexthdr
= nh
[nhoff
];
3392 oif
= skb_dst(skb
)->dev
->ifindex
;
3394 memset(fl6
, 0, sizeof(struct flowi6
));
3395 fl6
->flowi6_mark
= skb
->mark
;
3396 fl6
->flowi6_oif
= reverse
? skb
->skb_iif
: oif
;
3398 fl6
->daddr
= reverse
? hdr
->saddr
: hdr
->daddr
;
3399 fl6
->saddr
= reverse
? hdr
->daddr
: hdr
->saddr
;
3401 while (nh
+ offset
+ sizeof(*exthdr
) < skb
->data
||
3402 pskb_may_pull(skb
, nh
+ offset
+ sizeof(*exthdr
) - skb
->data
)) {
3403 nh
= skb_network_header(skb
);
3404 exthdr
= (struct ipv6_opt_hdr
*)(nh
+ offset
);
3407 case NEXTHDR_FRAGMENT
:
3410 case NEXTHDR_ROUTING
:
3413 offset
+= ipv6_optlen(exthdr
);
3414 nexthdr
= exthdr
->nexthdr
;
3415 exthdr
= (struct ipv6_opt_hdr
*)(nh
+ offset
);
3418 case IPPROTO_UDPLITE
:
3422 if (!onlyproto
&& (nh
+ offset
+ 4 < skb
->data
||
3423 pskb_may_pull(skb
, nh
+ offset
+ 4 - skb
->data
))) {
3426 nh
= skb_network_header(skb
);
3427 ports
= (__be16
*)(nh
+ offset
);
3428 fl6
->fl6_sport
= ports
[!!reverse
];
3429 fl6
->fl6_dport
= ports
[!reverse
];
3431 fl6
->flowi6_proto
= nexthdr
;
3433 case IPPROTO_ICMPV6
:
3434 if (!onlyproto
&& (nh
+ offset
+ 2 < skb
->data
||
3435 pskb_may_pull(skb
, nh
+ offset
+ 2 - skb
->data
))) {
3438 nh
= skb_network_header(skb
);
3439 icmp
= (u8
*)(nh
+ offset
);
3440 fl6
->fl6_icmp_type
= icmp
[0];
3441 fl6
->fl6_icmp_code
= icmp
[1];
3443 fl6
->flowi6_proto
= nexthdr
;
3445 #if IS_ENABLED(CONFIG_IPV6_MIP6)
3447 offset
+= ipv6_optlen(exthdr
);
3448 if (!onlyproto
&& (nh
+ offset
+ 3 < skb
->data
||
3449 pskb_may_pull(skb
, nh
+ offset
+ 3 - skb
->data
))) {
3452 nh
= skb_network_header(skb
);
3453 mh
= (struct ip6_mh
*)(nh
+ offset
);
3454 fl6
->fl6_mh_type
= mh
->ip6mh_type
;
3456 fl6
->flowi6_proto
= nexthdr
;
3459 /* XXX Why are there these headers? */
3464 fl6
->fl6_ipsec_spi
= 0;
3465 fl6
->flowi6_proto
= nexthdr
;
3472 int __xfrm_decode_session(struct sk_buff
*skb
, struct flowi
*fl
,
3473 unsigned int family
, int reverse
)
3477 decode_session4(skb
, fl
, reverse
);
3479 #if IS_ENABLED(CONFIG_IPV6)
3481 decode_session6(skb
, fl
, reverse
);
3485 return -EAFNOSUPPORT
;
3488 return security_xfrm_decode_session(skb
, &fl
->flowi_secid
);
3490 EXPORT_SYMBOL(__xfrm_decode_session
);
3492 static inline int secpath_has_nontransport(const struct sec_path
*sp
, int k
, int *idxp
)
3494 for (; k
< sp
->len
; k
++) {
3495 if (sp
->xvec
[k
]->props
.mode
!= XFRM_MODE_TRANSPORT
) {
3504 int __xfrm_policy_check(struct sock
*sk
, int dir
, struct sk_buff
*skb
,
3505 unsigned short family
)
3507 struct net
*net
= dev_net(skb
->dev
);
3508 struct xfrm_policy
*pol
;
3509 struct xfrm_policy
*pols
[XFRM_POLICY_TYPE_MAX
];
3516 const struct xfrm_if_cb
*ifcb
;
3517 struct sec_path
*sp
;
3522 ifcb
= xfrm_if_get_cb();
3525 xi
= ifcb
->decode_session(skb
, family
);
3527 if_id
= xi
->p
.if_id
;
3533 reverse
= dir
& ~XFRM_POLICY_MASK
;
3534 dir
&= XFRM_POLICY_MASK
;
3536 if (__xfrm_decode_session(skb
, &fl
, family
, reverse
) < 0) {
3537 XFRM_INC_STATS(net
, LINUX_MIB_XFRMINHDRERROR
);
3541 nf_nat_decode_session(skb
, &fl
, family
);
3543 /* First, check used SA against their selectors. */
3544 sp
= skb_sec_path(skb
);
3548 for (i
= sp
->len
- 1; i
>= 0; i
--) {
3549 struct xfrm_state
*x
= sp
->xvec
[i
];
3550 if (!xfrm_selector_match(&x
->sel
, &fl
, family
)) {
3551 XFRM_INC_STATS(net
, LINUX_MIB_XFRMINSTATEMISMATCH
);
3558 sk
= sk_to_full_sk(sk
);
3559 if (sk
&& sk
->sk_policy
[dir
]) {
3560 pol
= xfrm_sk_policy_lookup(sk
, dir
, &fl
, family
, if_id
);
3562 XFRM_INC_STATS(net
, LINUX_MIB_XFRMINPOLERROR
);
3568 pol
= xfrm_policy_lookup(net
, &fl
, family
, dir
, if_id
);
3571 XFRM_INC_STATS(net
, LINUX_MIB_XFRMINPOLERROR
);
3576 if (sp
&& secpath_has_nontransport(sp
, 0, &xerr_idx
)) {
3577 xfrm_secpath_reject(xerr_idx
, skb
, &fl
);
3578 XFRM_INC_STATS(net
, LINUX_MIB_XFRMINNOPOLS
);
3584 pol
->curlft
.use_time
= ktime_get_real_seconds();
3588 #ifdef CONFIG_XFRM_SUB_POLICY
3589 if (pols
[0]->type
!= XFRM_POLICY_TYPE_MAIN
) {
3590 pols
[1] = xfrm_policy_lookup_bytype(net
, XFRM_POLICY_TYPE_MAIN
,
3592 XFRM_POLICY_IN
, if_id
);
3594 if (IS_ERR(pols
[1])) {
3595 XFRM_INC_STATS(net
, LINUX_MIB_XFRMINPOLERROR
);
3598 pols
[1]->curlft
.use_time
= ktime_get_real_seconds();
3604 if (pol
->action
== XFRM_POLICY_ALLOW
) {
3605 static struct sec_path dummy
;
3606 struct xfrm_tmpl
*tp
[XFRM_MAX_DEPTH
];
3607 struct xfrm_tmpl
*stp
[XFRM_MAX_DEPTH
];
3608 struct xfrm_tmpl
**tpp
= tp
;
3612 sp
= skb_sec_path(skb
);
3616 for (pi
= 0; pi
< npols
; pi
++) {
3617 if (pols
[pi
] != pol
&&
3618 pols
[pi
]->action
!= XFRM_POLICY_ALLOW
) {
3619 XFRM_INC_STATS(net
, LINUX_MIB_XFRMINPOLBLOCK
);
3622 if (ti
+ pols
[pi
]->xfrm_nr
>= XFRM_MAX_DEPTH
) {
3623 XFRM_INC_STATS(net
, LINUX_MIB_XFRMINBUFFERERROR
);
3626 for (i
= 0; i
< pols
[pi
]->xfrm_nr
; i
++)
3627 tpp
[ti
++] = &pols
[pi
]->xfrm_vec
[i
];
3631 xfrm_tmpl_sort(stp
, tpp
, xfrm_nr
, family
, net
);
3635 /* For each tunnel xfrm, find the first matching tmpl.
3636 * For each tmpl before that, find corresponding xfrm.
3637 * Order is _important_. Later we will implement
3638 * some barriers, but at the moment barriers
3639 * are implied between each two transformations.
3641 for (i
= xfrm_nr
-1, k
= 0; i
>= 0; i
--) {
3642 k
= xfrm_policy_ok(tpp
[i
], sp
, k
, family
);
3645 /* "-2 - errored_index" returned */
3647 XFRM_INC_STATS(net
, LINUX_MIB_XFRMINTMPLMISMATCH
);
3652 if (secpath_has_nontransport(sp
, k
, &xerr_idx
)) {
3653 XFRM_INC_STATS(net
, LINUX_MIB_XFRMINTMPLMISMATCH
);
3657 xfrm_pols_put(pols
, npols
);
3660 XFRM_INC_STATS(net
, LINUX_MIB_XFRMINPOLBLOCK
);
3663 xfrm_secpath_reject(xerr_idx
, skb
, &fl
);
3665 xfrm_pols_put(pols
, npols
);
3668 EXPORT_SYMBOL(__xfrm_policy_check
);
3670 int __xfrm_route_forward(struct sk_buff
*skb
, unsigned short family
)
3672 struct net
*net
= dev_net(skb
->dev
);
3674 struct dst_entry
*dst
;
3677 if (xfrm_decode_session(skb
, &fl
, family
) < 0) {
3678 XFRM_INC_STATS(net
, LINUX_MIB_XFRMFWDHDRERROR
);
3683 if (!skb_dst(skb
)) {
3684 XFRM_INC_STATS(net
, LINUX_MIB_XFRMFWDHDRERROR
);
3688 dst
= xfrm_lookup(net
, skb_dst(skb
), &fl
, NULL
, XFRM_LOOKUP_QUEUE
);
3693 skb_dst_set(skb
, dst
);
3696 EXPORT_SYMBOL(__xfrm_route_forward
);
3698 /* Optimize later using cookies and generation ids. */
3700 static struct dst_entry
*xfrm_dst_check(struct dst_entry
*dst
, u32 cookie
)
3702 /* Code (such as __xfrm4_bundle_create()) sets dst->obsolete
3703 * to DST_OBSOLETE_FORCE_CHK to force all XFRM destinations to
3704 * get validated by dst_ops->check on every use. We do this
3705 * because when a normal route referenced by an XFRM dst is
3706 * obsoleted we do not go looking around for all parent
3707 * referencing XFRM dsts so that we can invalidate them. It
3708 * is just too much work. Instead we make the checks here on
3709 * every use. For example:
3711 * XFRM dst A --> IPv4 dst X
3713 * X is the "xdst->route" of A (X is also the "dst->path" of A
3714 * in this example). If X is marked obsolete, "A" will not
3715 * notice. That's what we are validating here via the
3716 * stale_bundle() check.
3718 * When a dst is removed from the fib tree, DST_OBSOLETE_DEAD will
3720 * This will force stale_bundle() to fail on any xdst bundle with
3721 * this dst linked in it.
3723 if (dst
->obsolete
< 0 && !stale_bundle(dst
))
3729 static int stale_bundle(struct dst_entry
*dst
)
3731 return !xfrm_bundle_ok((struct xfrm_dst
*)dst
);
3734 void xfrm_dst_ifdown(struct dst_entry
*dst
, struct net_device
*dev
)
3736 while ((dst
= xfrm_dst_child(dst
)) && dst
->xfrm
&& dst
->dev
== dev
) {
3737 dst
->dev
= dev_net(dev
)->loopback_dev
;
3742 EXPORT_SYMBOL(xfrm_dst_ifdown
);
3744 static void xfrm_link_failure(struct sk_buff
*skb
)
3746 /* Impossible. Such dst must be popped before reaches point of failure. */
3749 static struct dst_entry
*xfrm_negative_advice(struct dst_entry
*dst
)
3752 if (dst
->obsolete
) {
3760 static void xfrm_init_pmtu(struct xfrm_dst
**bundle
, int nr
)
3763 struct xfrm_dst
*xdst
= bundle
[nr
];
3764 u32 pmtu
, route_mtu_cached
;
3765 struct dst_entry
*dst
;
3768 pmtu
= dst_mtu(xfrm_dst_child(dst
));
3769 xdst
->child_mtu_cached
= pmtu
;
3771 pmtu
= xfrm_state_mtu(dst
->xfrm
, pmtu
);
3773 route_mtu_cached
= dst_mtu(xdst
->route
);
3774 xdst
->route_mtu_cached
= route_mtu_cached
;
3776 if (pmtu
> route_mtu_cached
)
3777 pmtu
= route_mtu_cached
;
3779 dst_metric_set(dst
, RTAX_MTU
, pmtu
);
3783 /* Check that the bundle accepts the flow and its components are
3787 static int xfrm_bundle_ok(struct xfrm_dst
*first
)
3789 struct xfrm_dst
*bundle
[XFRM_MAX_DEPTH
];
3790 struct dst_entry
*dst
= &first
->u
.dst
;
3791 struct xfrm_dst
*xdst
;
3795 if (!dst_check(xfrm_dst_path(dst
), ((struct xfrm_dst
*)dst
)->path_cookie
) ||
3796 (dst
->dev
&& !netif_running(dst
->dev
)))
3799 if (dst
->flags
& DST_XFRM_QUEUE
)
3802 start_from
= nr
= 0;
3804 struct xfrm_dst
*xdst
= (struct xfrm_dst
*)dst
;
3806 if (dst
->xfrm
->km
.state
!= XFRM_STATE_VALID
)
3808 if (xdst
->xfrm_genid
!= dst
->xfrm
->genid
)
3810 if (xdst
->num_pols
> 0 &&
3811 xdst
->policy_genid
!= atomic_read(&xdst
->pols
[0]->genid
))
3814 bundle
[nr
++] = xdst
;
3816 mtu
= dst_mtu(xfrm_dst_child(dst
));
3817 if (xdst
->child_mtu_cached
!= mtu
) {
3819 xdst
->child_mtu_cached
= mtu
;
3822 if (!dst_check(xdst
->route
, xdst
->route_cookie
))
3824 mtu
= dst_mtu(xdst
->route
);
3825 if (xdst
->route_mtu_cached
!= mtu
) {
3827 xdst
->route_mtu_cached
= mtu
;
3830 dst
= xfrm_dst_child(dst
);
3831 } while (dst
->xfrm
);
3833 if (likely(!start_from
))
3836 xdst
= bundle
[start_from
- 1];
3837 mtu
= xdst
->child_mtu_cached
;
3838 while (start_from
--) {
3841 mtu
= xfrm_state_mtu(dst
->xfrm
, mtu
);
3842 if (mtu
> xdst
->route_mtu_cached
)
3843 mtu
= xdst
->route_mtu_cached
;
3844 dst_metric_set(dst
, RTAX_MTU
, mtu
);
3848 xdst
= bundle
[start_from
- 1];
3849 xdst
->child_mtu_cached
= mtu
;
3855 static unsigned int xfrm_default_advmss(const struct dst_entry
*dst
)
3857 return dst_metric_advmss(xfrm_dst_path(dst
));
3860 static unsigned int xfrm_mtu(const struct dst_entry
*dst
)
3862 unsigned int mtu
= dst_metric_raw(dst
, RTAX_MTU
);
3864 return mtu
? : dst_mtu(xfrm_dst_path(dst
));
3867 static const void *xfrm_get_dst_nexthop(const struct dst_entry
*dst
,
3871 const struct xfrm_state
*xfrm
= dst
->xfrm
;
3873 dst
= xfrm_dst_child(dst
);
3875 if (xfrm
->props
.mode
== XFRM_MODE_TRANSPORT
)
3877 if (xfrm
->type
->flags
& XFRM_TYPE_REMOTE_COADDR
)
3878 daddr
= xfrm
->coaddr
;
3879 else if (!(xfrm
->type
->flags
& XFRM_TYPE_LOCAL_COADDR
))
3880 daddr
= &xfrm
->id
.daddr
;
3885 static struct neighbour
*xfrm_neigh_lookup(const struct dst_entry
*dst
,
3886 struct sk_buff
*skb
,
3889 const struct dst_entry
*path
= xfrm_dst_path(dst
);
3892 daddr
= xfrm_get_dst_nexthop(dst
, daddr
);
3893 return path
->ops
->neigh_lookup(path
, skb
, daddr
);
3896 static void xfrm_confirm_neigh(const struct dst_entry
*dst
, const void *daddr
)
3898 const struct dst_entry
*path
= xfrm_dst_path(dst
);
3900 daddr
= xfrm_get_dst_nexthop(dst
, daddr
);
3901 path
->ops
->confirm_neigh(path
, daddr
);
3904 int xfrm_policy_register_afinfo(const struct xfrm_policy_afinfo
*afinfo
, int family
)
3908 if (WARN_ON(family
>= ARRAY_SIZE(xfrm_policy_afinfo
)))
3909 return -EAFNOSUPPORT
;
3911 spin_lock(&xfrm_policy_afinfo_lock
);
3912 if (unlikely(xfrm_policy_afinfo
[family
] != NULL
))
3915 struct dst_ops
*dst_ops
= afinfo
->dst_ops
;
3916 if (likely(dst_ops
->kmem_cachep
== NULL
))
3917 dst_ops
->kmem_cachep
= xfrm_dst_cache
;
3918 if (likely(dst_ops
->check
== NULL
))
3919 dst_ops
->check
= xfrm_dst_check
;
3920 if (likely(dst_ops
->default_advmss
== NULL
))
3921 dst_ops
->default_advmss
= xfrm_default_advmss
;
3922 if (likely(dst_ops
->mtu
== NULL
))
3923 dst_ops
->mtu
= xfrm_mtu
;
3924 if (likely(dst_ops
->negative_advice
== NULL
))
3925 dst_ops
->negative_advice
= xfrm_negative_advice
;
3926 if (likely(dst_ops
->link_failure
== NULL
))
3927 dst_ops
->link_failure
= xfrm_link_failure
;
3928 if (likely(dst_ops
->neigh_lookup
== NULL
))
3929 dst_ops
->neigh_lookup
= xfrm_neigh_lookup
;
3930 if (likely(!dst_ops
->confirm_neigh
))
3931 dst_ops
->confirm_neigh
= xfrm_confirm_neigh
;
3932 rcu_assign_pointer(xfrm_policy_afinfo
[family
], afinfo
);
3934 spin_unlock(&xfrm_policy_afinfo_lock
);
3938 EXPORT_SYMBOL(xfrm_policy_register_afinfo
);
3940 void xfrm_policy_unregister_afinfo(const struct xfrm_policy_afinfo
*afinfo
)
3942 struct dst_ops
*dst_ops
= afinfo
->dst_ops
;
3945 for (i
= 0; i
< ARRAY_SIZE(xfrm_policy_afinfo
); i
++) {
3946 if (xfrm_policy_afinfo
[i
] != afinfo
)
3948 RCU_INIT_POINTER(xfrm_policy_afinfo
[i
], NULL
);
3954 dst_ops
->kmem_cachep
= NULL
;
3955 dst_ops
->check
= NULL
;
3956 dst_ops
->negative_advice
= NULL
;
3957 dst_ops
->link_failure
= NULL
;
3959 EXPORT_SYMBOL(xfrm_policy_unregister_afinfo
);
3961 void xfrm_if_register_cb(const struct xfrm_if_cb
*ifcb
)
3963 spin_lock(&xfrm_if_cb_lock
);
3964 rcu_assign_pointer(xfrm_if_cb
, ifcb
);
3965 spin_unlock(&xfrm_if_cb_lock
);
3967 EXPORT_SYMBOL(xfrm_if_register_cb
);
3969 void xfrm_if_unregister_cb(void)
3971 RCU_INIT_POINTER(xfrm_if_cb
, NULL
);
3974 EXPORT_SYMBOL(xfrm_if_unregister_cb
);
3976 #ifdef CONFIG_XFRM_STATISTICS
3977 static int __net_init
xfrm_statistics_init(struct net
*net
)
3980 net
->mib
.xfrm_statistics
= alloc_percpu(struct linux_xfrm_mib
);
3981 if (!net
->mib
.xfrm_statistics
)
3983 rv
= xfrm_proc_init(net
);
3985 free_percpu(net
->mib
.xfrm_statistics
);
3989 static void xfrm_statistics_fini(struct net
*net
)
3991 xfrm_proc_fini(net
);
3992 free_percpu(net
->mib
.xfrm_statistics
);
3995 static int __net_init
xfrm_statistics_init(struct net
*net
)
4000 static void xfrm_statistics_fini(struct net
*net
)
4005 static int __net_init
xfrm_policy_init(struct net
*net
)
4007 unsigned int hmask
, sz
;
4010 if (net_eq(net
, &init_net
)) {
4011 xfrm_dst_cache
= kmem_cache_create("xfrm_dst_cache",
4012 sizeof(struct xfrm_dst
),
4013 0, SLAB_HWCACHE_ALIGN
|SLAB_PANIC
,
4015 err
= rhashtable_init(&xfrm_policy_inexact_table
,
4016 &xfrm_pol_inexact_params
);
4021 sz
= (hmask
+1) * sizeof(struct hlist_head
);
4023 net
->xfrm
.policy_byidx
= xfrm_hash_alloc(sz
);
4024 if (!net
->xfrm
.policy_byidx
)
4026 net
->xfrm
.policy_idx_hmask
= hmask
;
4028 for (dir
= 0; dir
< XFRM_POLICY_MAX
; dir
++) {
4029 struct xfrm_policy_hash
*htab
;
4031 net
->xfrm
.policy_count
[dir
] = 0;
4032 net
->xfrm
.policy_count
[XFRM_POLICY_MAX
+ dir
] = 0;
4033 INIT_HLIST_HEAD(&net
->xfrm
.policy_inexact
[dir
]);
4035 htab
= &net
->xfrm
.policy_bydst
[dir
];
4036 htab
->table
= xfrm_hash_alloc(sz
);
4039 htab
->hmask
= hmask
;
4045 net
->xfrm
.policy_hthresh
.lbits4
= 32;
4046 net
->xfrm
.policy_hthresh
.rbits4
= 32;
4047 net
->xfrm
.policy_hthresh
.lbits6
= 128;
4048 net
->xfrm
.policy_hthresh
.rbits6
= 128;
4050 seqlock_init(&net
->xfrm
.policy_hthresh
.lock
);
4052 INIT_LIST_HEAD(&net
->xfrm
.policy_all
);
4053 INIT_LIST_HEAD(&net
->xfrm
.inexact_bins
);
4054 INIT_WORK(&net
->xfrm
.policy_hash_work
, xfrm_hash_resize
);
4055 INIT_WORK(&net
->xfrm
.policy_hthresh
.work
, xfrm_hash_rebuild
);
4059 for (dir
--; dir
>= 0; dir
--) {
4060 struct xfrm_policy_hash
*htab
;
4062 htab
= &net
->xfrm
.policy_bydst
[dir
];
4063 xfrm_hash_free(htab
->table
, sz
);
4065 xfrm_hash_free(net
->xfrm
.policy_byidx
, sz
);
4070 static void xfrm_policy_fini(struct net
*net
)
4072 struct xfrm_pol_inexact_bin
*b
, *t
;
4076 flush_work(&net
->xfrm
.policy_hash_work
);
4077 #ifdef CONFIG_XFRM_SUB_POLICY
4078 xfrm_policy_flush(net
, XFRM_POLICY_TYPE_SUB
, false);
4080 xfrm_policy_flush(net
, XFRM_POLICY_TYPE_MAIN
, false);
4082 WARN_ON(!list_empty(&net
->xfrm
.policy_all
));
4084 for (dir
= 0; dir
< XFRM_POLICY_MAX
; dir
++) {
4085 struct xfrm_policy_hash
*htab
;
4087 WARN_ON(!hlist_empty(&net
->xfrm
.policy_inexact
[dir
]));
4089 htab
= &net
->xfrm
.policy_bydst
[dir
];
4090 sz
= (htab
->hmask
+ 1) * sizeof(struct hlist_head
);
4091 WARN_ON(!hlist_empty(htab
->table
));
4092 xfrm_hash_free(htab
->table
, sz
);
4095 sz
= (net
->xfrm
.policy_idx_hmask
+ 1) * sizeof(struct hlist_head
);
4096 WARN_ON(!hlist_empty(net
->xfrm
.policy_byidx
));
4097 xfrm_hash_free(net
->xfrm
.policy_byidx
, sz
);
4099 spin_lock_bh(&net
->xfrm
.xfrm_policy_lock
);
4100 list_for_each_entry_safe(b
, t
, &net
->xfrm
.inexact_bins
, inexact_bins
)
4101 __xfrm_policy_inexact_prune_bin(b
, true);
4102 spin_unlock_bh(&net
->xfrm
.xfrm_policy_lock
);
4105 static int __net_init
xfrm_net_init(struct net
*net
)
4109 /* Initialize the per-net locks here */
4110 spin_lock_init(&net
->xfrm
.xfrm_state_lock
);
4111 spin_lock_init(&net
->xfrm
.xfrm_policy_lock
);
4112 mutex_init(&net
->xfrm
.xfrm_cfg_mutex
);
4114 rv
= xfrm_statistics_init(net
);
4116 goto out_statistics
;
4117 rv
= xfrm_state_init(net
);
4120 rv
= xfrm_policy_init(net
);
4123 rv
= xfrm_sysctl_init(net
);
4130 xfrm_policy_fini(net
);
4132 xfrm_state_fini(net
);
4134 xfrm_statistics_fini(net
);
4139 static void __net_exit
xfrm_net_exit(struct net
*net
)
4141 xfrm_sysctl_fini(net
);
4142 xfrm_policy_fini(net
);
4143 xfrm_state_fini(net
);
4144 xfrm_statistics_fini(net
);
4147 static struct pernet_operations __net_initdata xfrm_net_ops
= {
4148 .init
= xfrm_net_init
,
4149 .exit
= xfrm_net_exit
,
4152 void __init
xfrm_init(void)
4154 register_pernet_subsys(&xfrm_net_ops
);
4156 seqcount_init(&xfrm_policy_hash_generation
);
4159 RCU_INIT_POINTER(xfrm_if_cb
, NULL
);
4163 #ifdef CONFIG_AUDITSYSCALL
4164 static void xfrm_audit_common_policyinfo(struct xfrm_policy
*xp
,
4165 struct audit_buffer
*audit_buf
)
4167 struct xfrm_sec_ctx
*ctx
= xp
->security
;
4168 struct xfrm_selector
*sel
= &xp
->selector
;
4171 audit_log_format(audit_buf
, " sec_alg=%u sec_doi=%u sec_obj=%s",
4172 ctx
->ctx_alg
, ctx
->ctx_doi
, ctx
->ctx_str
);
4174 switch (sel
->family
) {
4176 audit_log_format(audit_buf
, " src=%pI4", &sel
->saddr
.a4
);
4177 if (sel
->prefixlen_s
!= 32)
4178 audit_log_format(audit_buf
, " src_prefixlen=%d",
4180 audit_log_format(audit_buf
, " dst=%pI4", &sel
->daddr
.a4
);
4181 if (sel
->prefixlen_d
!= 32)
4182 audit_log_format(audit_buf
, " dst_prefixlen=%d",
4186 audit_log_format(audit_buf
, " src=%pI6", sel
->saddr
.a6
);
4187 if (sel
->prefixlen_s
!= 128)
4188 audit_log_format(audit_buf
, " src_prefixlen=%d",
4190 audit_log_format(audit_buf
, " dst=%pI6", sel
->daddr
.a6
);
4191 if (sel
->prefixlen_d
!= 128)
4192 audit_log_format(audit_buf
, " dst_prefixlen=%d",
4198 void xfrm_audit_policy_add(struct xfrm_policy
*xp
, int result
, bool task_valid
)
4200 struct audit_buffer
*audit_buf
;
4202 audit_buf
= xfrm_audit_start("SPD-add");
4203 if (audit_buf
== NULL
)
4205 xfrm_audit_helper_usrinfo(task_valid
, audit_buf
);
4206 audit_log_format(audit_buf
, " res=%u", result
);
4207 xfrm_audit_common_policyinfo(xp
, audit_buf
);
4208 audit_log_end(audit_buf
);
4210 EXPORT_SYMBOL_GPL(xfrm_audit_policy_add
);
4212 void xfrm_audit_policy_delete(struct xfrm_policy
*xp
, int result
,
4215 struct audit_buffer
*audit_buf
;
4217 audit_buf
= xfrm_audit_start("SPD-delete");
4218 if (audit_buf
== NULL
)
4220 xfrm_audit_helper_usrinfo(task_valid
, audit_buf
);
4221 audit_log_format(audit_buf
, " res=%u", result
);
4222 xfrm_audit_common_policyinfo(xp
, audit_buf
);
4223 audit_log_end(audit_buf
);
4225 EXPORT_SYMBOL_GPL(xfrm_audit_policy_delete
);
4228 #ifdef CONFIG_XFRM_MIGRATE
4229 static bool xfrm_migrate_selector_match(const struct xfrm_selector
*sel_cmp
,
4230 const struct xfrm_selector
*sel_tgt
)
4232 if (sel_cmp
->proto
== IPSEC_ULPROTO_ANY
) {
4233 if (sel_tgt
->family
== sel_cmp
->family
&&
4234 xfrm_addr_equal(&sel_tgt
->daddr
, &sel_cmp
->daddr
,
4236 xfrm_addr_equal(&sel_tgt
->saddr
, &sel_cmp
->saddr
,
4238 sel_tgt
->prefixlen_d
== sel_cmp
->prefixlen_d
&&
4239 sel_tgt
->prefixlen_s
== sel_cmp
->prefixlen_s
) {
4243 if (memcmp(sel_tgt
, sel_cmp
, sizeof(*sel_tgt
)) == 0) {
4250 static struct xfrm_policy
*xfrm_migrate_policy_find(const struct xfrm_selector
*sel
,
4251 u8 dir
, u8 type
, struct net
*net
)
4253 struct xfrm_policy
*pol
, *ret
= NULL
;
4254 struct hlist_head
*chain
;
4257 spin_lock_bh(&net
->xfrm
.xfrm_policy_lock
);
4258 chain
= policy_hash_direct(net
, &sel
->daddr
, &sel
->saddr
, sel
->family
, dir
);
4259 hlist_for_each_entry(pol
, chain
, bydst
) {
4260 if (xfrm_migrate_selector_match(sel
, &pol
->selector
) &&
4261 pol
->type
== type
) {
4263 priority
= ret
->priority
;
4267 chain
= &net
->xfrm
.policy_inexact
[dir
];
4268 hlist_for_each_entry(pol
, chain
, bydst_inexact_list
) {
4269 if ((pol
->priority
>= priority
) && ret
)
4272 if (xfrm_migrate_selector_match(sel
, &pol
->selector
) &&
4273 pol
->type
== type
) {
4281 spin_unlock_bh(&net
->xfrm
.xfrm_policy_lock
);
4286 static int migrate_tmpl_match(const struct xfrm_migrate
*m
, const struct xfrm_tmpl
*t
)
4290 if (t
->mode
== m
->mode
&& t
->id
.proto
== m
->proto
&&
4291 (m
->reqid
== 0 || t
->reqid
== m
->reqid
)) {
4293 case XFRM_MODE_TUNNEL
:
4294 case XFRM_MODE_BEET
:
4295 if (xfrm_addr_equal(&t
->id
.daddr
, &m
->old_daddr
,
4297 xfrm_addr_equal(&t
->saddr
, &m
->old_saddr
,
4302 case XFRM_MODE_TRANSPORT
:
4303 /* in case of transport mode, template does not store
4304 any IP addresses, hence we just compare mode and
4315 /* update endpoint address(es) of template(s) */
4316 static int xfrm_policy_migrate(struct xfrm_policy
*pol
,
4317 struct xfrm_migrate
*m
, int num_migrate
)
4319 struct xfrm_migrate
*mp
;
4322 write_lock_bh(&pol
->lock
);
4323 if (unlikely(pol
->walk
.dead
)) {
4324 /* target policy has been deleted */
4325 write_unlock_bh(&pol
->lock
);
4329 for (i
= 0; i
< pol
->xfrm_nr
; i
++) {
4330 for (j
= 0, mp
= m
; j
< num_migrate
; j
++, mp
++) {
4331 if (!migrate_tmpl_match(mp
, &pol
->xfrm_vec
[i
]))
4334 if (pol
->xfrm_vec
[i
].mode
!= XFRM_MODE_TUNNEL
&&
4335 pol
->xfrm_vec
[i
].mode
!= XFRM_MODE_BEET
)
4337 /* update endpoints */
4338 memcpy(&pol
->xfrm_vec
[i
].id
.daddr
, &mp
->new_daddr
,
4339 sizeof(pol
->xfrm_vec
[i
].id
.daddr
));
4340 memcpy(&pol
->xfrm_vec
[i
].saddr
, &mp
->new_saddr
,
4341 sizeof(pol
->xfrm_vec
[i
].saddr
));
4342 pol
->xfrm_vec
[i
].encap_family
= mp
->new_family
;
4344 atomic_inc(&pol
->genid
);
4348 write_unlock_bh(&pol
->lock
);
4356 static int xfrm_migrate_check(const struct xfrm_migrate
*m
, int num_migrate
)
4360 if (num_migrate
< 1 || num_migrate
> XFRM_MAX_DEPTH
)
4363 for (i
= 0; i
< num_migrate
; i
++) {
4364 if (xfrm_addr_any(&m
[i
].new_daddr
, m
[i
].new_family
) ||
4365 xfrm_addr_any(&m
[i
].new_saddr
, m
[i
].new_family
))
4368 /* check if there is any duplicated entry */
4369 for (j
= i
+ 1; j
< num_migrate
; j
++) {
4370 if (!memcmp(&m
[i
].old_daddr
, &m
[j
].old_daddr
,
4371 sizeof(m
[i
].old_daddr
)) &&
4372 !memcmp(&m
[i
].old_saddr
, &m
[j
].old_saddr
,
4373 sizeof(m
[i
].old_saddr
)) &&
4374 m
[i
].proto
== m
[j
].proto
&&
4375 m
[i
].mode
== m
[j
].mode
&&
4376 m
[i
].reqid
== m
[j
].reqid
&&
4377 m
[i
].old_family
== m
[j
].old_family
)
4385 int xfrm_migrate(const struct xfrm_selector
*sel
, u8 dir
, u8 type
,
4386 struct xfrm_migrate
*m
, int num_migrate
,
4387 struct xfrm_kmaddress
*k
, struct net
*net
,
4388 struct xfrm_encap_tmpl
*encap
)
4390 int i
, err
, nx_cur
= 0, nx_new
= 0;
4391 struct xfrm_policy
*pol
= NULL
;
4392 struct xfrm_state
*x
, *xc
;
4393 struct xfrm_state
*x_cur
[XFRM_MAX_DEPTH
];
4394 struct xfrm_state
*x_new
[XFRM_MAX_DEPTH
];
4395 struct xfrm_migrate
*mp
;
4397 /* Stage 0 - sanity checks */
4398 if ((err
= xfrm_migrate_check(m
, num_migrate
)) < 0)
4401 if (dir
>= XFRM_POLICY_MAX
) {
4406 /* Stage 1 - find policy */
4407 if ((pol
= xfrm_migrate_policy_find(sel
, dir
, type
, net
)) == NULL
) {
4412 /* Stage 2 - find and update state(s) */
4413 for (i
= 0, mp
= m
; i
< num_migrate
; i
++, mp
++) {
4414 if ((x
= xfrm_migrate_state_find(mp
, net
))) {
4417 xc
= xfrm_state_migrate(x
, mp
, encap
);
4428 /* Stage 3 - update policy */
4429 if ((err
= xfrm_policy_migrate(pol
, m
, num_migrate
)) < 0)
4432 /* Stage 4 - delete old state(s) */
4434 xfrm_states_put(x_cur
, nx_cur
);
4435 xfrm_states_delete(x_cur
, nx_cur
);
4438 /* Stage 5 - announce */
4439 km_migrate(sel
, dir
, type
, m
, num_migrate
, k
, encap
);
4451 xfrm_states_put(x_cur
, nx_cur
);
4453 xfrm_states_delete(x_new
, nx_new
);
4457 EXPORT_SYMBOL(xfrm_migrate
);