1 // SPDX-License-Identifier: GPL-2.0-only
7 * Kazunori MIYAZAWA @USAGI
8 * Kunihiro Ishiguro <kunihiro@ipinfusion.com>
10 * Kazunori MIYAZAWA @USAGI
12 * Split up af-specific portion
13 * Derek Atkins <derek@ihtfp.com> Add the post_input processor
17 #include <linux/err.h>
18 #include <linux/slab.h>
19 #include <linux/kmod.h>
20 #include <linux/list.h>
21 #include <linux/spinlock.h>
22 #include <linux/workqueue.h>
23 #include <linux/notifier.h>
24 #include <linux/netdevice.h>
25 #include <linux/netfilter.h>
26 #include <linux/module.h>
27 #include <linux/cache.h>
28 #include <linux/cpu.h>
29 #include <linux/audit.h>
30 #include <linux/rhashtable.h>
31 #include <linux/if_tunnel.h>
36 #if IS_ENABLED(CONFIG_IPV6_MIP6)
39 #ifdef CONFIG_XFRM_STATISTICS
42 #ifdef CONFIG_INET_ESPINTCP
43 #include <net/espintcp.h>
46 #include "xfrm_hash.h"
48 #define XFRM_QUEUE_TMO_MIN ((unsigned)(HZ/10))
49 #define XFRM_QUEUE_TMO_MAX ((unsigned)(60*HZ))
50 #define XFRM_MAX_QUEUE_LEN 100
53 struct dst_entry
*dst_orig
;
57 /* prefixes smaller than this are stored in lists, not trees. */
58 #define INEXACT_PREFIXLEN_IPV4 16
59 #define INEXACT_PREFIXLEN_IPV6 48
61 struct xfrm_pol_inexact_node
{
71 /* the policies matching this node, can be empty list */
72 struct hlist_head hhead
;
75 /* xfrm inexact policy search tree:
76 * xfrm_pol_inexact_bin = hash(dir,type,family,if_id);
78 * +---- root_d: sorted by daddr:prefix
80 * | xfrm_pol_inexact_node
82 * | +- root: sorted by saddr/prefix
84 * | | xfrm_pol_inexact_node
88 * | | + hhead: saddr:daddr policies
90 * | +- coarse policies and all any:daddr policies
92 * +---- root_s: sorted by saddr:prefix
94 * | xfrm_pol_inexact_node
98 * | + hhead: saddr:any policies
100 * +---- coarse policies and all any:any policies
102 * Lookups return four candidate lists:
103 * 1. any:any list from top-level xfrm_pol_inexact_bin
104 * 2. any:daddr list from daddr tree
105 * 3. saddr:daddr list from 2nd level daddr tree
106 * 4. saddr:any list from saddr tree
108 * This result set then needs to be searched for the policy with
109 * the lowest priority. If two results have same prio, youngest one wins.
112 struct xfrm_pol_inexact_key
{
119 struct xfrm_pol_inexact_bin
{
120 struct xfrm_pol_inexact_key k
;
121 struct rhash_head head
;
122 /* list containing '*:*' policies */
123 struct hlist_head hhead
;
126 /* tree sorted by daddr/prefix */
127 struct rb_root root_d
;
129 /* tree sorted by saddr/prefix */
130 struct rb_root root_s
;
132 /* slow path below */
133 struct list_head inexact_bins
;
137 enum xfrm_pol_inexact_candidate_type
{
146 struct xfrm_pol_inexact_candidates
{
147 struct hlist_head
*res
[XFRM_POL_CAND_MAX
];
150 static DEFINE_SPINLOCK(xfrm_if_cb_lock
);
151 static struct xfrm_if_cb
const __rcu
*xfrm_if_cb __read_mostly
;
153 static DEFINE_SPINLOCK(xfrm_policy_afinfo_lock
);
154 static struct xfrm_policy_afinfo
const __rcu
*xfrm_policy_afinfo
[AF_INET6
+ 1]
157 static struct kmem_cache
*xfrm_dst_cache __ro_after_init
;
158 static __read_mostly seqcount_t xfrm_policy_hash_generation
;
160 static struct rhashtable xfrm_policy_inexact_table
;
161 static const struct rhashtable_params xfrm_pol_inexact_params
;
163 static void xfrm_init_pmtu(struct xfrm_dst
**bundle
, int nr
);
164 static int stale_bundle(struct dst_entry
*dst
);
165 static int xfrm_bundle_ok(struct xfrm_dst
*xdst
);
166 static void xfrm_policy_queue_process(struct timer_list
*t
);
168 static void __xfrm_policy_link(struct xfrm_policy
*pol
, int dir
);
169 static struct xfrm_policy
*__xfrm_policy_unlink(struct xfrm_policy
*pol
,
172 static struct xfrm_pol_inexact_bin
*
173 xfrm_policy_inexact_lookup(struct net
*net
, u8 type
, u16 family
, u8 dir
,
176 static struct xfrm_pol_inexact_bin
*
177 xfrm_policy_inexact_lookup_rcu(struct net
*net
,
178 u8 type
, u16 family
, u8 dir
, u32 if_id
);
179 static struct xfrm_policy
*
180 xfrm_policy_insert_list(struct hlist_head
*chain
, struct xfrm_policy
*policy
,
182 static void xfrm_policy_insert_inexact_list(struct hlist_head
*chain
,
183 struct xfrm_policy
*policy
);
186 xfrm_policy_find_inexact_candidates(struct xfrm_pol_inexact_candidates
*cand
,
187 struct xfrm_pol_inexact_bin
*b
,
188 const xfrm_address_t
*saddr
,
189 const xfrm_address_t
*daddr
);
191 static inline bool xfrm_pol_hold_rcu(struct xfrm_policy
*policy
)
193 return refcount_inc_not_zero(&policy
->refcnt
);
197 __xfrm4_selector_match(const struct xfrm_selector
*sel
, const struct flowi
*fl
)
199 const struct flowi4
*fl4
= &fl
->u
.ip4
;
201 return addr4_match(fl4
->daddr
, sel
->daddr
.a4
, sel
->prefixlen_d
) &&
202 addr4_match(fl4
->saddr
, sel
->saddr
.a4
, sel
->prefixlen_s
) &&
203 !((xfrm_flowi_dport(fl
, &fl4
->uli
) ^ sel
->dport
) & sel
->dport_mask
) &&
204 !((xfrm_flowi_sport(fl
, &fl4
->uli
) ^ sel
->sport
) & sel
->sport_mask
) &&
205 (fl4
->flowi4_proto
== sel
->proto
|| !sel
->proto
) &&
206 (fl4
->flowi4_oif
== sel
->ifindex
|| !sel
->ifindex
);
210 __xfrm6_selector_match(const struct xfrm_selector
*sel
, const struct flowi
*fl
)
212 const struct flowi6
*fl6
= &fl
->u
.ip6
;
214 return addr_match(&fl6
->daddr
, &sel
->daddr
, sel
->prefixlen_d
) &&
215 addr_match(&fl6
->saddr
, &sel
->saddr
, sel
->prefixlen_s
) &&
216 !((xfrm_flowi_dport(fl
, &fl6
->uli
) ^ sel
->dport
) & sel
->dport_mask
) &&
217 !((xfrm_flowi_sport(fl
, &fl6
->uli
) ^ sel
->sport
) & sel
->sport_mask
) &&
218 (fl6
->flowi6_proto
== sel
->proto
|| !sel
->proto
) &&
219 (fl6
->flowi6_oif
== sel
->ifindex
|| !sel
->ifindex
);
222 bool xfrm_selector_match(const struct xfrm_selector
*sel
, const struct flowi
*fl
,
223 unsigned short family
)
227 return __xfrm4_selector_match(sel
, fl
);
229 return __xfrm6_selector_match(sel
, fl
);
234 static const struct xfrm_policy_afinfo
*xfrm_policy_get_afinfo(unsigned short family
)
236 const struct xfrm_policy_afinfo
*afinfo
;
238 if (unlikely(family
>= ARRAY_SIZE(xfrm_policy_afinfo
)))
241 afinfo
= rcu_dereference(xfrm_policy_afinfo
[family
]);
242 if (unlikely(!afinfo
))
247 /* Called with rcu_read_lock(). */
248 static const struct xfrm_if_cb
*xfrm_if_get_cb(void)
250 return rcu_dereference(xfrm_if_cb
);
253 struct dst_entry
*__xfrm_dst_lookup(struct net
*net
, int tos
, int oif
,
254 const xfrm_address_t
*saddr
,
255 const xfrm_address_t
*daddr
,
256 int family
, u32 mark
)
258 const struct xfrm_policy_afinfo
*afinfo
;
259 struct dst_entry
*dst
;
261 afinfo
= xfrm_policy_get_afinfo(family
);
262 if (unlikely(afinfo
== NULL
))
263 return ERR_PTR(-EAFNOSUPPORT
);
265 dst
= afinfo
->dst_lookup(net
, tos
, oif
, saddr
, daddr
, mark
);
271 EXPORT_SYMBOL(__xfrm_dst_lookup
);
273 static inline struct dst_entry
*xfrm_dst_lookup(struct xfrm_state
*x
,
275 xfrm_address_t
*prev_saddr
,
276 xfrm_address_t
*prev_daddr
,
277 int family
, u32 mark
)
279 struct net
*net
= xs_net(x
);
280 xfrm_address_t
*saddr
= &x
->props
.saddr
;
281 xfrm_address_t
*daddr
= &x
->id
.daddr
;
282 struct dst_entry
*dst
;
284 if (x
->type
->flags
& XFRM_TYPE_LOCAL_COADDR
) {
288 if (x
->type
->flags
& XFRM_TYPE_REMOTE_COADDR
) {
293 dst
= __xfrm_dst_lookup(net
, tos
, oif
, saddr
, daddr
, family
, mark
);
296 if (prev_saddr
!= saddr
)
297 memcpy(prev_saddr
, saddr
, sizeof(*prev_saddr
));
298 if (prev_daddr
!= daddr
)
299 memcpy(prev_daddr
, daddr
, sizeof(*prev_daddr
));
305 static inline unsigned long make_jiffies(long secs
)
307 if (secs
>= (MAX_SCHEDULE_TIMEOUT
-1)/HZ
)
308 return MAX_SCHEDULE_TIMEOUT
-1;
313 static void xfrm_policy_timer(struct timer_list
*t
)
315 struct xfrm_policy
*xp
= from_timer(xp
, t
, timer
);
316 time64_t now
= ktime_get_real_seconds();
317 time64_t next
= TIME64_MAX
;
321 read_lock(&xp
->lock
);
323 if (unlikely(xp
->walk
.dead
))
326 dir
= xfrm_policy_id2dir(xp
->index
);
328 if (xp
->lft
.hard_add_expires_seconds
) {
329 time64_t tmo
= xp
->lft
.hard_add_expires_seconds
+
330 xp
->curlft
.add_time
- now
;
336 if (xp
->lft
.hard_use_expires_seconds
) {
337 time64_t tmo
= xp
->lft
.hard_use_expires_seconds
+
338 (xp
->curlft
.use_time
? : xp
->curlft
.add_time
) - now
;
344 if (xp
->lft
.soft_add_expires_seconds
) {
345 time64_t tmo
= xp
->lft
.soft_add_expires_seconds
+
346 xp
->curlft
.add_time
- now
;
349 tmo
= XFRM_KM_TIMEOUT
;
354 if (xp
->lft
.soft_use_expires_seconds
) {
355 time64_t tmo
= xp
->lft
.soft_use_expires_seconds
+
356 (xp
->curlft
.use_time
? : xp
->curlft
.add_time
) - now
;
359 tmo
= XFRM_KM_TIMEOUT
;
366 km_policy_expired(xp
, dir
, 0, 0);
367 if (next
!= TIME64_MAX
&&
368 !mod_timer(&xp
->timer
, jiffies
+ make_jiffies(next
)))
372 read_unlock(&xp
->lock
);
377 read_unlock(&xp
->lock
);
378 if (!xfrm_policy_delete(xp
, dir
))
379 km_policy_expired(xp
, dir
, 1, 0);
383 /* Allocate xfrm_policy. Not used here, it is supposed to be used by pfkeyv2
387 struct xfrm_policy
*xfrm_policy_alloc(struct net
*net
, gfp_t gfp
)
389 struct xfrm_policy
*policy
;
391 policy
= kzalloc(sizeof(struct xfrm_policy
), gfp
);
394 write_pnet(&policy
->xp_net
, net
);
395 INIT_LIST_HEAD(&policy
->walk
.all
);
396 INIT_HLIST_NODE(&policy
->bydst_inexact_list
);
397 INIT_HLIST_NODE(&policy
->bydst
);
398 INIT_HLIST_NODE(&policy
->byidx
);
399 rwlock_init(&policy
->lock
);
400 refcount_set(&policy
->refcnt
, 1);
401 skb_queue_head_init(&policy
->polq
.hold_queue
);
402 timer_setup(&policy
->timer
, xfrm_policy_timer
, 0);
403 timer_setup(&policy
->polq
.hold_timer
,
404 xfrm_policy_queue_process
, 0);
408 EXPORT_SYMBOL(xfrm_policy_alloc
);
410 static void xfrm_policy_destroy_rcu(struct rcu_head
*head
)
412 struct xfrm_policy
*policy
= container_of(head
, struct xfrm_policy
, rcu
);
414 security_xfrm_policy_free(policy
->security
);
418 /* Destroy xfrm_policy: descendant resources must be released to this moment. */
420 void xfrm_policy_destroy(struct xfrm_policy
*policy
)
422 BUG_ON(!policy
->walk
.dead
);
424 if (del_timer(&policy
->timer
) || del_timer(&policy
->polq
.hold_timer
))
427 call_rcu(&policy
->rcu
, xfrm_policy_destroy_rcu
);
429 EXPORT_SYMBOL(xfrm_policy_destroy
);
431 /* Rule must be locked. Release descendant resources, announce
432 * entry dead. The rule must be unlinked from lists to the moment.
435 static void xfrm_policy_kill(struct xfrm_policy
*policy
)
437 policy
->walk
.dead
= 1;
439 atomic_inc(&policy
->genid
);
441 if (del_timer(&policy
->polq
.hold_timer
))
442 xfrm_pol_put(policy
);
443 skb_queue_purge(&policy
->polq
.hold_queue
);
445 if (del_timer(&policy
->timer
))
446 xfrm_pol_put(policy
);
448 xfrm_pol_put(policy
);
451 static unsigned int xfrm_policy_hashmax __read_mostly
= 1 * 1024 * 1024;
453 static inline unsigned int idx_hash(struct net
*net
, u32 index
)
455 return __idx_hash(index
, net
->xfrm
.policy_idx_hmask
);
458 /* calculate policy hash thresholds */
459 static void __get_hash_thresh(struct net
*net
,
460 unsigned short family
, int dir
,
461 u8
*dbits
, u8
*sbits
)
465 *dbits
= net
->xfrm
.policy_bydst
[dir
].dbits4
;
466 *sbits
= net
->xfrm
.policy_bydst
[dir
].sbits4
;
470 *dbits
= net
->xfrm
.policy_bydst
[dir
].dbits6
;
471 *sbits
= net
->xfrm
.policy_bydst
[dir
].sbits6
;
480 static struct hlist_head
*policy_hash_bysel(struct net
*net
,
481 const struct xfrm_selector
*sel
,
482 unsigned short family
, int dir
)
484 unsigned int hmask
= net
->xfrm
.policy_bydst
[dir
].hmask
;
489 __get_hash_thresh(net
, family
, dir
, &dbits
, &sbits
);
490 hash
= __sel_hash(sel
, family
, hmask
, dbits
, sbits
);
492 if (hash
== hmask
+ 1)
495 return rcu_dereference_check(net
->xfrm
.policy_bydst
[dir
].table
,
496 lockdep_is_held(&net
->xfrm
.xfrm_policy_lock
)) + hash
;
499 static struct hlist_head
*policy_hash_direct(struct net
*net
,
500 const xfrm_address_t
*daddr
,
501 const xfrm_address_t
*saddr
,
502 unsigned short family
, int dir
)
504 unsigned int hmask
= net
->xfrm
.policy_bydst
[dir
].hmask
;
509 __get_hash_thresh(net
, family
, dir
, &dbits
, &sbits
);
510 hash
= __addr_hash(daddr
, saddr
, family
, hmask
, dbits
, sbits
);
512 return rcu_dereference_check(net
->xfrm
.policy_bydst
[dir
].table
,
513 lockdep_is_held(&net
->xfrm
.xfrm_policy_lock
)) + hash
;
516 static void xfrm_dst_hash_transfer(struct net
*net
,
517 struct hlist_head
*list
,
518 struct hlist_head
*ndsttable
,
519 unsigned int nhashmask
,
522 struct hlist_node
*tmp
, *entry0
= NULL
;
523 struct xfrm_policy
*pol
;
529 hlist_for_each_entry_safe(pol
, tmp
, list
, bydst
) {
532 __get_hash_thresh(net
, pol
->family
, dir
, &dbits
, &sbits
);
533 h
= __addr_hash(&pol
->selector
.daddr
, &pol
->selector
.saddr
,
534 pol
->family
, nhashmask
, dbits
, sbits
);
536 hlist_del_rcu(&pol
->bydst
);
537 hlist_add_head_rcu(&pol
->bydst
, ndsttable
+ h
);
542 hlist_del_rcu(&pol
->bydst
);
543 hlist_add_behind_rcu(&pol
->bydst
, entry0
);
545 entry0
= &pol
->bydst
;
547 if (!hlist_empty(list
)) {
553 static void xfrm_idx_hash_transfer(struct hlist_head
*list
,
554 struct hlist_head
*nidxtable
,
555 unsigned int nhashmask
)
557 struct hlist_node
*tmp
;
558 struct xfrm_policy
*pol
;
560 hlist_for_each_entry_safe(pol
, tmp
, list
, byidx
) {
563 h
= __idx_hash(pol
->index
, nhashmask
);
564 hlist_add_head(&pol
->byidx
, nidxtable
+h
);
568 static unsigned long xfrm_new_hash_mask(unsigned int old_hmask
)
570 return ((old_hmask
+ 1) << 1) - 1;
573 static void xfrm_bydst_resize(struct net
*net
, int dir
)
575 unsigned int hmask
= net
->xfrm
.policy_bydst
[dir
].hmask
;
576 unsigned int nhashmask
= xfrm_new_hash_mask(hmask
);
577 unsigned int nsize
= (nhashmask
+ 1) * sizeof(struct hlist_head
);
578 struct hlist_head
*ndst
= xfrm_hash_alloc(nsize
);
579 struct hlist_head
*odst
;
585 spin_lock_bh(&net
->xfrm
.xfrm_policy_lock
);
586 write_seqcount_begin(&xfrm_policy_hash_generation
);
588 odst
= rcu_dereference_protected(net
->xfrm
.policy_bydst
[dir
].table
,
589 lockdep_is_held(&net
->xfrm
.xfrm_policy_lock
));
591 for (i
= hmask
; i
>= 0; i
--)
592 xfrm_dst_hash_transfer(net
, odst
+ i
, ndst
, nhashmask
, dir
);
594 rcu_assign_pointer(net
->xfrm
.policy_bydst
[dir
].table
, ndst
);
595 net
->xfrm
.policy_bydst
[dir
].hmask
= nhashmask
;
597 write_seqcount_end(&xfrm_policy_hash_generation
);
598 spin_unlock_bh(&net
->xfrm
.xfrm_policy_lock
);
602 xfrm_hash_free(odst
, (hmask
+ 1) * sizeof(struct hlist_head
));
605 static void xfrm_byidx_resize(struct net
*net
, int total
)
607 unsigned int hmask
= net
->xfrm
.policy_idx_hmask
;
608 unsigned int nhashmask
= xfrm_new_hash_mask(hmask
);
609 unsigned int nsize
= (nhashmask
+ 1) * sizeof(struct hlist_head
);
610 struct hlist_head
*oidx
= net
->xfrm
.policy_byidx
;
611 struct hlist_head
*nidx
= xfrm_hash_alloc(nsize
);
617 spin_lock_bh(&net
->xfrm
.xfrm_policy_lock
);
619 for (i
= hmask
; i
>= 0; i
--)
620 xfrm_idx_hash_transfer(oidx
+ i
, nidx
, nhashmask
);
622 net
->xfrm
.policy_byidx
= nidx
;
623 net
->xfrm
.policy_idx_hmask
= nhashmask
;
625 spin_unlock_bh(&net
->xfrm
.xfrm_policy_lock
);
627 xfrm_hash_free(oidx
, (hmask
+ 1) * sizeof(struct hlist_head
));
630 static inline int xfrm_bydst_should_resize(struct net
*net
, int dir
, int *total
)
632 unsigned int cnt
= net
->xfrm
.policy_count
[dir
];
633 unsigned int hmask
= net
->xfrm
.policy_bydst
[dir
].hmask
;
638 if ((hmask
+ 1) < xfrm_policy_hashmax
&&
645 static inline int xfrm_byidx_should_resize(struct net
*net
, int total
)
647 unsigned int hmask
= net
->xfrm
.policy_idx_hmask
;
649 if ((hmask
+ 1) < xfrm_policy_hashmax
&&
656 void xfrm_spd_getinfo(struct net
*net
, struct xfrmk_spdinfo
*si
)
658 si
->incnt
= net
->xfrm
.policy_count
[XFRM_POLICY_IN
];
659 si
->outcnt
= net
->xfrm
.policy_count
[XFRM_POLICY_OUT
];
660 si
->fwdcnt
= net
->xfrm
.policy_count
[XFRM_POLICY_FWD
];
661 si
->inscnt
= net
->xfrm
.policy_count
[XFRM_POLICY_IN
+XFRM_POLICY_MAX
];
662 si
->outscnt
= net
->xfrm
.policy_count
[XFRM_POLICY_OUT
+XFRM_POLICY_MAX
];
663 si
->fwdscnt
= net
->xfrm
.policy_count
[XFRM_POLICY_FWD
+XFRM_POLICY_MAX
];
664 si
->spdhcnt
= net
->xfrm
.policy_idx_hmask
;
665 si
->spdhmcnt
= xfrm_policy_hashmax
;
667 EXPORT_SYMBOL(xfrm_spd_getinfo
);
669 static DEFINE_MUTEX(hash_resize_mutex
);
670 static void xfrm_hash_resize(struct work_struct
*work
)
672 struct net
*net
= container_of(work
, struct net
, xfrm
.policy_hash_work
);
675 mutex_lock(&hash_resize_mutex
);
678 for (dir
= 0; dir
< XFRM_POLICY_MAX
; dir
++) {
679 if (xfrm_bydst_should_resize(net
, dir
, &total
))
680 xfrm_bydst_resize(net
, dir
);
682 if (xfrm_byidx_should_resize(net
, total
))
683 xfrm_byidx_resize(net
, total
);
685 mutex_unlock(&hash_resize_mutex
);
688 /* Make sure *pol can be inserted into fastbin.
689 * Useful to check that later insert requests will be sucessful
690 * (provided xfrm_policy_lock is held throughout).
692 static struct xfrm_pol_inexact_bin
*
693 xfrm_policy_inexact_alloc_bin(const struct xfrm_policy
*pol
, u8 dir
)
695 struct xfrm_pol_inexact_bin
*bin
, *prev
;
696 struct xfrm_pol_inexact_key k
= {
697 .family
= pol
->family
,
702 struct net
*net
= xp_net(pol
);
704 lockdep_assert_held(&net
->xfrm
.xfrm_policy_lock
);
706 write_pnet(&k
.net
, net
);
707 bin
= rhashtable_lookup_fast(&xfrm_policy_inexact_table
, &k
,
708 xfrm_pol_inexact_params
);
712 bin
= kzalloc(sizeof(*bin
), GFP_ATOMIC
);
717 INIT_HLIST_HEAD(&bin
->hhead
);
718 bin
->root_d
= RB_ROOT
;
719 bin
->root_s
= RB_ROOT
;
720 seqcount_init(&bin
->count
);
722 prev
= rhashtable_lookup_get_insert_key(&xfrm_policy_inexact_table
,
724 xfrm_pol_inexact_params
);
726 list_add(&bin
->inexact_bins
, &net
->xfrm
.inexact_bins
);
732 return IS_ERR(prev
) ? NULL
: prev
;
735 static bool xfrm_pol_inexact_addr_use_any_list(const xfrm_address_t
*addr
,
736 int family
, u8 prefixlen
)
738 if (xfrm_addr_any(addr
, family
))
741 if (family
== AF_INET6
&& prefixlen
< INEXACT_PREFIXLEN_IPV6
)
744 if (family
== AF_INET
&& prefixlen
< INEXACT_PREFIXLEN_IPV4
)
751 xfrm_policy_inexact_insert_use_any_list(const struct xfrm_policy
*policy
)
753 const xfrm_address_t
*addr
;
754 bool saddr_any
, daddr_any
;
757 addr
= &policy
->selector
.saddr
;
758 prefixlen
= policy
->selector
.prefixlen_s
;
760 saddr_any
= xfrm_pol_inexact_addr_use_any_list(addr
,
763 addr
= &policy
->selector
.daddr
;
764 prefixlen
= policy
->selector
.prefixlen_d
;
765 daddr_any
= xfrm_pol_inexact_addr_use_any_list(addr
,
768 return saddr_any
&& daddr_any
;
771 static void xfrm_pol_inexact_node_init(struct xfrm_pol_inexact_node
*node
,
772 const xfrm_address_t
*addr
, u8 prefixlen
)
775 node
->prefixlen
= prefixlen
;
778 static struct xfrm_pol_inexact_node
*
779 xfrm_pol_inexact_node_alloc(const xfrm_address_t
*addr
, u8 prefixlen
)
781 struct xfrm_pol_inexact_node
*node
;
783 node
= kzalloc(sizeof(*node
), GFP_ATOMIC
);
785 xfrm_pol_inexact_node_init(node
, addr
, prefixlen
);
790 static int xfrm_policy_addr_delta(const xfrm_address_t
*a
,
791 const xfrm_address_t
*b
,
792 u8 prefixlen
, u16 family
)
794 unsigned int pdw
, pbi
;
799 if (sizeof(long) == 4 && prefixlen
== 0)
800 return ntohl(a
->a4
) - ntohl(b
->a4
);
801 return (ntohl(a
->a4
) & ((~0UL << (32 - prefixlen
)))) -
802 (ntohl(b
->a4
) & ((~0UL << (32 - prefixlen
))));
804 pdw
= prefixlen
>> 5;
805 pbi
= prefixlen
& 0x1f;
808 delta
= memcmp(a
->a6
, b
->a6
, pdw
<< 2);
813 u32 mask
= ~0u << (32 - pbi
);
815 delta
= (ntohl(a
->a6
[pdw
]) & mask
) -
816 (ntohl(b
->a6
[pdw
]) & mask
);
826 static void xfrm_policy_inexact_list_reinsert(struct net
*net
,
827 struct xfrm_pol_inexact_node
*n
,
830 unsigned int matched_s
, matched_d
;
831 struct xfrm_policy
*policy
, *p
;
836 list_for_each_entry_reverse(policy
, &net
->xfrm
.policy_all
, walk
.all
) {
837 struct hlist_node
*newpos
= NULL
;
838 bool matches_s
, matches_d
;
840 if (!policy
->bydst_reinsert
)
843 WARN_ON_ONCE(policy
->family
!= family
);
845 policy
->bydst_reinsert
= false;
846 hlist_for_each_entry(p
, &n
->hhead
, bydst
) {
847 if (policy
->priority
> p
->priority
)
849 else if (policy
->priority
== p
->priority
&&
850 policy
->pos
> p
->pos
)
857 hlist_add_behind_rcu(&policy
->bydst
, newpos
);
859 hlist_add_head_rcu(&policy
->bydst
, &n
->hhead
);
861 /* paranoia checks follow.
862 * Check that the reinserted policy matches at least
863 * saddr or daddr for current node prefix.
865 * Matching both is fine, matching saddr in one policy
866 * (but not daddr) and then matching only daddr in another
869 matches_s
= xfrm_policy_addr_delta(&policy
->selector
.saddr
,
873 matches_d
= xfrm_policy_addr_delta(&policy
->selector
.daddr
,
877 if (matches_s
&& matches_d
)
880 WARN_ON_ONCE(!matches_s
&& !matches_d
);
885 WARN_ON_ONCE(matched_s
&& matched_d
);
889 static void xfrm_policy_inexact_node_reinsert(struct net
*net
,
890 struct xfrm_pol_inexact_node
*n
,
894 struct xfrm_pol_inexact_node
*node
;
895 struct rb_node
**p
, *parent
;
897 /* we should not have another subtree here */
898 WARN_ON_ONCE(!RB_EMPTY_ROOT(&n
->root
));
907 node
= rb_entry(*p
, struct xfrm_pol_inexact_node
, node
);
909 prefixlen
= min(node
->prefixlen
, n
->prefixlen
);
911 delta
= xfrm_policy_addr_delta(&n
->addr
, &node
->addr
,
914 p
= &parent
->rb_left
;
915 } else if (delta
> 0) {
916 p
= &parent
->rb_right
;
918 bool same_prefixlen
= node
->prefixlen
== n
->prefixlen
;
919 struct xfrm_policy
*tmp
;
921 hlist_for_each_entry(tmp
, &n
->hhead
, bydst
) {
922 tmp
->bydst_reinsert
= true;
923 hlist_del_rcu(&tmp
->bydst
);
926 node
->prefixlen
= prefixlen
;
928 xfrm_policy_inexact_list_reinsert(net
, node
, family
);
930 if (same_prefixlen
) {
942 rb_link_node_rcu(&n
->node
, parent
, p
);
943 rb_insert_color(&n
->node
, new);
946 /* merge nodes v and n */
947 static void xfrm_policy_inexact_node_merge(struct net
*net
,
948 struct xfrm_pol_inexact_node
*v
,
949 struct xfrm_pol_inexact_node
*n
,
952 struct xfrm_pol_inexact_node
*node
;
953 struct xfrm_policy
*tmp
;
954 struct rb_node
*rnode
;
956 /* To-be-merged node v has a subtree.
958 * Dismantle it and insert its nodes to n->root.
960 while ((rnode
= rb_first(&v
->root
)) != NULL
) {
961 node
= rb_entry(rnode
, struct xfrm_pol_inexact_node
, node
);
962 rb_erase(&node
->node
, &v
->root
);
963 xfrm_policy_inexact_node_reinsert(net
, node
, &n
->root
,
967 hlist_for_each_entry(tmp
, &v
->hhead
, bydst
) {
968 tmp
->bydst_reinsert
= true;
969 hlist_del_rcu(&tmp
->bydst
);
972 xfrm_policy_inexact_list_reinsert(net
, n
, family
);
975 static struct xfrm_pol_inexact_node
*
976 xfrm_policy_inexact_insert_node(struct net
*net
,
977 struct rb_root
*root
,
978 xfrm_address_t
*addr
,
979 u16 family
, u8 prefixlen
, u8 dir
)
981 struct xfrm_pol_inexact_node
*cached
= NULL
;
982 struct rb_node
**p
, *parent
= NULL
;
983 struct xfrm_pol_inexact_node
*node
;
990 node
= rb_entry(*p
, struct xfrm_pol_inexact_node
, node
);
992 delta
= xfrm_policy_addr_delta(addr
, &node
->addr
,
995 if (delta
== 0 && prefixlen
>= node
->prefixlen
) {
996 WARN_ON_ONCE(cached
); /* ipsec policies got lost */
1001 p
= &parent
->rb_left
;
1003 p
= &parent
->rb_right
;
1005 if (prefixlen
< node
->prefixlen
) {
1006 delta
= xfrm_policy_addr_delta(addr
, &node
->addr
,
1012 /* This node is a subnet of the new prefix. It needs
1013 * to be removed and re-inserted with the smaller
1014 * prefix and all nodes that are now also covered
1015 * by the reduced prefixlen.
1017 rb_erase(&node
->node
, root
);
1020 xfrm_pol_inexact_node_init(node
, addr
,
1024 /* This node also falls within the new
1025 * prefixlen. Merge the to-be-reinserted
1026 * node and this one.
1028 xfrm_policy_inexact_node_merge(net
, node
,
1030 kfree_rcu(node
, rcu
);
1041 node
= xfrm_pol_inexact_node_alloc(addr
, prefixlen
);
1046 rb_link_node_rcu(&node
->node
, parent
, p
);
1047 rb_insert_color(&node
->node
, root
);
1052 static void xfrm_policy_inexact_gc_tree(struct rb_root
*r
, bool rm
)
1054 struct xfrm_pol_inexact_node
*node
;
1055 struct rb_node
*rn
= rb_first(r
);
1058 node
= rb_entry(rn
, struct xfrm_pol_inexact_node
, node
);
1060 xfrm_policy_inexact_gc_tree(&node
->root
, rm
);
1063 if (!hlist_empty(&node
->hhead
) || !RB_EMPTY_ROOT(&node
->root
)) {
1068 rb_erase(&node
->node
, r
);
1069 kfree_rcu(node
, rcu
);
1073 static void __xfrm_policy_inexact_prune_bin(struct xfrm_pol_inexact_bin
*b
, bool net_exit
)
1075 write_seqcount_begin(&b
->count
);
1076 xfrm_policy_inexact_gc_tree(&b
->root_d
, net_exit
);
1077 xfrm_policy_inexact_gc_tree(&b
->root_s
, net_exit
);
1078 write_seqcount_end(&b
->count
);
1080 if (!RB_EMPTY_ROOT(&b
->root_d
) || !RB_EMPTY_ROOT(&b
->root_s
) ||
1081 !hlist_empty(&b
->hhead
)) {
1082 WARN_ON_ONCE(net_exit
);
1086 if (rhashtable_remove_fast(&xfrm_policy_inexact_table
, &b
->head
,
1087 xfrm_pol_inexact_params
) == 0) {
1088 list_del(&b
->inexact_bins
);
1093 static void xfrm_policy_inexact_prune_bin(struct xfrm_pol_inexact_bin
*b
)
1095 struct net
*net
= read_pnet(&b
->k
.net
);
1097 spin_lock_bh(&net
->xfrm
.xfrm_policy_lock
);
1098 __xfrm_policy_inexact_prune_bin(b
, false);
1099 spin_unlock_bh(&net
->xfrm
.xfrm_policy_lock
);
1102 static void __xfrm_policy_inexact_flush(struct net
*net
)
1104 struct xfrm_pol_inexact_bin
*bin
, *t
;
1106 lockdep_assert_held(&net
->xfrm
.xfrm_policy_lock
);
1108 list_for_each_entry_safe(bin
, t
, &net
->xfrm
.inexact_bins
, inexact_bins
)
1109 __xfrm_policy_inexact_prune_bin(bin
, false);
1112 static struct hlist_head
*
1113 xfrm_policy_inexact_alloc_chain(struct xfrm_pol_inexact_bin
*bin
,
1114 struct xfrm_policy
*policy
, u8 dir
)
1116 struct xfrm_pol_inexact_node
*n
;
1119 net
= xp_net(policy
);
1120 lockdep_assert_held(&net
->xfrm
.xfrm_policy_lock
);
1122 if (xfrm_policy_inexact_insert_use_any_list(policy
))
1125 if (xfrm_pol_inexact_addr_use_any_list(&policy
->selector
.daddr
,
1127 policy
->selector
.prefixlen_d
)) {
1128 write_seqcount_begin(&bin
->count
);
1129 n
= xfrm_policy_inexact_insert_node(net
,
1131 &policy
->selector
.saddr
,
1133 policy
->selector
.prefixlen_s
,
1135 write_seqcount_end(&bin
->count
);
1142 /* daddr is fixed */
1143 write_seqcount_begin(&bin
->count
);
1144 n
= xfrm_policy_inexact_insert_node(net
,
1146 &policy
->selector
.daddr
,
1148 policy
->selector
.prefixlen_d
, dir
);
1149 write_seqcount_end(&bin
->count
);
1153 /* saddr is wildcard */
1154 if (xfrm_pol_inexact_addr_use_any_list(&policy
->selector
.saddr
,
1156 policy
->selector
.prefixlen_s
))
1159 write_seqcount_begin(&bin
->count
);
1160 n
= xfrm_policy_inexact_insert_node(net
,
1162 &policy
->selector
.saddr
,
1164 policy
->selector
.prefixlen_s
, dir
);
1165 write_seqcount_end(&bin
->count
);
1172 static struct xfrm_policy
*
1173 xfrm_policy_inexact_insert(struct xfrm_policy
*policy
, u8 dir
, int excl
)
1175 struct xfrm_pol_inexact_bin
*bin
;
1176 struct xfrm_policy
*delpol
;
1177 struct hlist_head
*chain
;
1180 bin
= xfrm_policy_inexact_alloc_bin(policy
, dir
);
1182 return ERR_PTR(-ENOMEM
);
1184 net
= xp_net(policy
);
1185 lockdep_assert_held(&net
->xfrm
.xfrm_policy_lock
);
1187 chain
= xfrm_policy_inexact_alloc_chain(bin
, policy
, dir
);
1189 __xfrm_policy_inexact_prune_bin(bin
, false);
1190 return ERR_PTR(-ENOMEM
);
1193 delpol
= xfrm_policy_insert_list(chain
, policy
, excl
);
1194 if (delpol
&& excl
) {
1195 __xfrm_policy_inexact_prune_bin(bin
, false);
1196 return ERR_PTR(-EEXIST
);
1199 chain
= &net
->xfrm
.policy_inexact
[dir
];
1200 xfrm_policy_insert_inexact_list(chain
, policy
);
1203 __xfrm_policy_inexact_prune_bin(bin
, false);
1208 static void xfrm_hash_rebuild(struct work_struct
*work
)
1210 struct net
*net
= container_of(work
, struct net
,
1211 xfrm
.policy_hthresh
.work
);
1213 struct xfrm_policy
*pol
;
1214 struct xfrm_policy
*policy
;
1215 struct hlist_head
*chain
;
1216 struct hlist_head
*odst
;
1217 struct hlist_node
*newpos
;
1221 u8 lbits4
, rbits4
, lbits6
, rbits6
;
1223 mutex_lock(&hash_resize_mutex
);
1225 /* read selector prefixlen thresholds */
1227 seq
= read_seqbegin(&net
->xfrm
.policy_hthresh
.lock
);
1229 lbits4
= net
->xfrm
.policy_hthresh
.lbits4
;
1230 rbits4
= net
->xfrm
.policy_hthresh
.rbits4
;
1231 lbits6
= net
->xfrm
.policy_hthresh
.lbits6
;
1232 rbits6
= net
->xfrm
.policy_hthresh
.rbits6
;
1233 } while (read_seqretry(&net
->xfrm
.policy_hthresh
.lock
, seq
));
1235 spin_lock_bh(&net
->xfrm
.xfrm_policy_lock
);
1236 write_seqcount_begin(&xfrm_policy_hash_generation
);
1238 /* make sure that we can insert the indirect policies again before
1239 * we start with destructive action.
1241 list_for_each_entry(policy
, &net
->xfrm
.policy_all
, walk
.all
) {
1242 struct xfrm_pol_inexact_bin
*bin
;
1245 dir
= xfrm_policy_id2dir(policy
->index
);
1246 if (policy
->walk
.dead
|| dir
>= XFRM_POLICY_MAX
)
1249 if ((dir
& XFRM_POLICY_MASK
) == XFRM_POLICY_OUT
) {
1250 if (policy
->family
== AF_INET
) {
1258 if (policy
->family
== AF_INET
) {
1267 if (policy
->selector
.prefixlen_d
< dbits
||
1268 policy
->selector
.prefixlen_s
< sbits
)
1271 bin
= xfrm_policy_inexact_alloc_bin(policy
, dir
);
1275 if (!xfrm_policy_inexact_alloc_chain(bin
, policy
, dir
))
1279 /* reset the bydst and inexact table in all directions */
1280 for (dir
= 0; dir
< XFRM_POLICY_MAX
; dir
++) {
1281 struct hlist_node
*n
;
1283 hlist_for_each_entry_safe(policy
, n
,
1284 &net
->xfrm
.policy_inexact
[dir
],
1285 bydst_inexact_list
) {
1286 hlist_del_rcu(&policy
->bydst
);
1287 hlist_del_init(&policy
->bydst_inexact_list
);
1290 hmask
= net
->xfrm
.policy_bydst
[dir
].hmask
;
1291 odst
= net
->xfrm
.policy_bydst
[dir
].table
;
1292 for (i
= hmask
; i
>= 0; i
--) {
1293 hlist_for_each_entry_safe(policy
, n
, odst
+ i
, bydst
)
1294 hlist_del_rcu(&policy
->bydst
);
1296 if ((dir
& XFRM_POLICY_MASK
) == XFRM_POLICY_OUT
) {
1297 /* dir out => dst = remote, src = local */
1298 net
->xfrm
.policy_bydst
[dir
].dbits4
= rbits4
;
1299 net
->xfrm
.policy_bydst
[dir
].sbits4
= lbits4
;
1300 net
->xfrm
.policy_bydst
[dir
].dbits6
= rbits6
;
1301 net
->xfrm
.policy_bydst
[dir
].sbits6
= lbits6
;
1303 /* dir in/fwd => dst = local, src = remote */
1304 net
->xfrm
.policy_bydst
[dir
].dbits4
= lbits4
;
1305 net
->xfrm
.policy_bydst
[dir
].sbits4
= rbits4
;
1306 net
->xfrm
.policy_bydst
[dir
].dbits6
= lbits6
;
1307 net
->xfrm
.policy_bydst
[dir
].sbits6
= rbits6
;
1311 /* re-insert all policies by order of creation */
1312 list_for_each_entry_reverse(policy
, &net
->xfrm
.policy_all
, walk
.all
) {
1313 if (policy
->walk
.dead
)
1315 dir
= xfrm_policy_id2dir(policy
->index
);
1316 if (dir
>= XFRM_POLICY_MAX
) {
1317 /* skip socket policies */
1321 chain
= policy_hash_bysel(net
, &policy
->selector
,
1322 policy
->family
, dir
);
1325 void *p
= xfrm_policy_inexact_insert(policy
, dir
, 0);
1327 WARN_ONCE(IS_ERR(p
), "reinsert: %ld\n", PTR_ERR(p
));
1331 hlist_for_each_entry(pol
, chain
, bydst
) {
1332 if (policy
->priority
>= pol
->priority
)
1333 newpos
= &pol
->bydst
;
1338 hlist_add_behind_rcu(&policy
->bydst
, newpos
);
1340 hlist_add_head_rcu(&policy
->bydst
, chain
);
1344 __xfrm_policy_inexact_flush(net
);
1345 write_seqcount_end(&xfrm_policy_hash_generation
);
1346 spin_unlock_bh(&net
->xfrm
.xfrm_policy_lock
);
1348 mutex_unlock(&hash_resize_mutex
);
1351 void xfrm_policy_hash_rebuild(struct net
*net
)
1353 schedule_work(&net
->xfrm
.policy_hthresh
.work
);
1355 EXPORT_SYMBOL(xfrm_policy_hash_rebuild
);
1357 /* Generate new index... KAME seems to generate them ordered by cost
1358 * of an absolute inpredictability of ordering of rules. This will not pass. */
1359 static u32
xfrm_gen_index(struct net
*net
, int dir
, u32 index
)
1361 static u32 idx_generator
;
1364 struct hlist_head
*list
;
1365 struct xfrm_policy
*p
;
1370 idx
= (idx_generator
| dir
);
1379 list
= net
->xfrm
.policy_byidx
+ idx_hash(net
, idx
);
1381 hlist_for_each_entry(p
, list
, byidx
) {
1382 if (p
->index
== idx
) {
1392 static inline int selector_cmp(struct xfrm_selector
*s1
, struct xfrm_selector
*s2
)
1394 u32
*p1
= (u32
*) s1
;
1395 u32
*p2
= (u32
*) s2
;
1396 int len
= sizeof(struct xfrm_selector
) / sizeof(u32
);
1399 for (i
= 0; i
< len
; i
++) {
1407 static void xfrm_policy_requeue(struct xfrm_policy
*old
,
1408 struct xfrm_policy
*new)
1410 struct xfrm_policy_queue
*pq
= &old
->polq
;
1411 struct sk_buff_head list
;
1413 if (skb_queue_empty(&pq
->hold_queue
))
1416 __skb_queue_head_init(&list
);
1418 spin_lock_bh(&pq
->hold_queue
.lock
);
1419 skb_queue_splice_init(&pq
->hold_queue
, &list
);
1420 if (del_timer(&pq
->hold_timer
))
1422 spin_unlock_bh(&pq
->hold_queue
.lock
);
1426 spin_lock_bh(&pq
->hold_queue
.lock
);
1427 skb_queue_splice(&list
, &pq
->hold_queue
);
1428 pq
->timeout
= XFRM_QUEUE_TMO_MIN
;
1429 if (!mod_timer(&pq
->hold_timer
, jiffies
))
1431 spin_unlock_bh(&pq
->hold_queue
.lock
);
1434 static bool xfrm_policy_mark_match(struct xfrm_policy
*policy
,
1435 struct xfrm_policy
*pol
)
1437 u32 mark
= policy
->mark
.v
& policy
->mark
.m
;
1439 if (policy
->mark
.v
== pol
->mark
.v
&& policy
->mark
.m
== pol
->mark
.m
)
1442 if ((mark
& pol
->mark
.m
) == pol
->mark
.v
&&
1443 policy
->priority
== pol
->priority
)
1449 static u32
xfrm_pol_bin_key(const void *data
, u32 len
, u32 seed
)
1451 const struct xfrm_pol_inexact_key
*k
= data
;
1452 u32 a
= k
->type
<< 24 | k
->dir
<< 16 | k
->family
;
1454 return jhash_3words(a
, k
->if_id
, net_hash_mix(read_pnet(&k
->net
)),
1458 static u32
xfrm_pol_bin_obj(const void *data
, u32 len
, u32 seed
)
1460 const struct xfrm_pol_inexact_bin
*b
= data
;
1462 return xfrm_pol_bin_key(&b
->k
, 0, seed
);
1465 static int xfrm_pol_bin_cmp(struct rhashtable_compare_arg
*arg
,
1468 const struct xfrm_pol_inexact_key
*key
= arg
->key
;
1469 const struct xfrm_pol_inexact_bin
*b
= ptr
;
1472 if (!net_eq(read_pnet(&b
->k
.net
), read_pnet(&key
->net
)))
1475 ret
= b
->k
.dir
^ key
->dir
;
1479 ret
= b
->k
.type
^ key
->type
;
1483 ret
= b
->k
.family
^ key
->family
;
1487 return b
->k
.if_id
^ key
->if_id
;
1490 static const struct rhashtable_params xfrm_pol_inexact_params
= {
1491 .head_offset
= offsetof(struct xfrm_pol_inexact_bin
, head
),
1492 .hashfn
= xfrm_pol_bin_key
,
1493 .obj_hashfn
= xfrm_pol_bin_obj
,
1494 .obj_cmpfn
= xfrm_pol_bin_cmp
,
1495 .automatic_shrinking
= true,
1498 static void xfrm_policy_insert_inexact_list(struct hlist_head
*chain
,
1499 struct xfrm_policy
*policy
)
1501 struct xfrm_policy
*pol
, *delpol
= NULL
;
1502 struct hlist_node
*newpos
= NULL
;
1505 hlist_for_each_entry(pol
, chain
, bydst_inexact_list
) {
1506 if (pol
->type
== policy
->type
&&
1507 pol
->if_id
== policy
->if_id
&&
1508 !selector_cmp(&pol
->selector
, &policy
->selector
) &&
1509 xfrm_policy_mark_match(policy
, pol
) &&
1510 xfrm_sec_ctx_match(pol
->security
, policy
->security
) &&
1513 if (policy
->priority
> pol
->priority
)
1515 } else if (policy
->priority
>= pol
->priority
) {
1516 newpos
= &pol
->bydst_inexact_list
;
1524 hlist_add_behind_rcu(&policy
->bydst_inexact_list
, newpos
);
1526 hlist_add_head_rcu(&policy
->bydst_inexact_list
, chain
);
1528 hlist_for_each_entry(pol
, chain
, bydst_inexact_list
) {
1534 static struct xfrm_policy
*xfrm_policy_insert_list(struct hlist_head
*chain
,
1535 struct xfrm_policy
*policy
,
1538 struct xfrm_policy
*pol
, *newpos
= NULL
, *delpol
= NULL
;
1540 hlist_for_each_entry(pol
, chain
, bydst
) {
1541 if (pol
->type
== policy
->type
&&
1542 pol
->if_id
== policy
->if_id
&&
1543 !selector_cmp(&pol
->selector
, &policy
->selector
) &&
1544 xfrm_policy_mark_match(policy
, pol
) &&
1545 xfrm_sec_ctx_match(pol
->security
, policy
->security
) &&
1548 return ERR_PTR(-EEXIST
);
1550 if (policy
->priority
> pol
->priority
)
1552 } else if (policy
->priority
>= pol
->priority
) {
1561 hlist_add_behind_rcu(&policy
->bydst
, &newpos
->bydst
);
1563 hlist_add_head_rcu(&policy
->bydst
, chain
);
1568 int xfrm_policy_insert(int dir
, struct xfrm_policy
*policy
, int excl
)
1570 struct net
*net
= xp_net(policy
);
1571 struct xfrm_policy
*delpol
;
1572 struct hlist_head
*chain
;
1574 spin_lock_bh(&net
->xfrm
.xfrm_policy_lock
);
1575 chain
= policy_hash_bysel(net
, &policy
->selector
, policy
->family
, dir
);
1577 delpol
= xfrm_policy_insert_list(chain
, policy
, excl
);
1579 delpol
= xfrm_policy_inexact_insert(policy
, dir
, excl
);
1581 if (IS_ERR(delpol
)) {
1582 spin_unlock_bh(&net
->xfrm
.xfrm_policy_lock
);
1583 return PTR_ERR(delpol
);
1586 __xfrm_policy_link(policy
, dir
);
1588 /* After previous checking, family can either be AF_INET or AF_INET6 */
1589 if (policy
->family
== AF_INET
)
1590 rt_genid_bump_ipv4(net
);
1592 rt_genid_bump_ipv6(net
);
1595 xfrm_policy_requeue(delpol
, policy
);
1596 __xfrm_policy_unlink(delpol
, dir
);
1598 policy
->index
= delpol
? delpol
->index
: xfrm_gen_index(net
, dir
, policy
->index
);
1599 hlist_add_head(&policy
->byidx
, net
->xfrm
.policy_byidx
+idx_hash(net
, policy
->index
));
1600 policy
->curlft
.add_time
= ktime_get_real_seconds();
1601 policy
->curlft
.use_time
= 0;
1602 if (!mod_timer(&policy
->timer
, jiffies
+ HZ
))
1603 xfrm_pol_hold(policy
);
1604 spin_unlock_bh(&net
->xfrm
.xfrm_policy_lock
);
1607 xfrm_policy_kill(delpol
);
1608 else if (xfrm_bydst_should_resize(net
, dir
, NULL
))
1609 schedule_work(&net
->xfrm
.policy_hash_work
);
1613 EXPORT_SYMBOL(xfrm_policy_insert
);
1615 static struct xfrm_policy
*
1616 __xfrm_policy_bysel_ctx(struct hlist_head
*chain
, u32 mark
, u32 if_id
,
1618 struct xfrm_selector
*sel
,
1619 struct xfrm_sec_ctx
*ctx
)
1621 struct xfrm_policy
*pol
;
1626 hlist_for_each_entry(pol
, chain
, bydst
) {
1627 if (pol
->type
== type
&&
1628 pol
->if_id
== if_id
&&
1629 (mark
& pol
->mark
.m
) == pol
->mark
.v
&&
1630 !selector_cmp(sel
, &pol
->selector
) &&
1631 xfrm_sec_ctx_match(ctx
, pol
->security
))
1638 struct xfrm_policy
*xfrm_policy_bysel_ctx(struct net
*net
, u32 mark
, u32 if_id
,
1640 struct xfrm_selector
*sel
,
1641 struct xfrm_sec_ctx
*ctx
, int delete,
1644 struct xfrm_pol_inexact_bin
*bin
= NULL
;
1645 struct xfrm_policy
*pol
, *ret
= NULL
;
1646 struct hlist_head
*chain
;
1649 spin_lock_bh(&net
->xfrm
.xfrm_policy_lock
);
1650 chain
= policy_hash_bysel(net
, sel
, sel
->family
, dir
);
1652 struct xfrm_pol_inexact_candidates cand
;
1655 bin
= xfrm_policy_inexact_lookup(net
, type
,
1656 sel
->family
, dir
, if_id
);
1658 spin_unlock_bh(&net
->xfrm
.xfrm_policy_lock
);
1662 if (!xfrm_policy_find_inexact_candidates(&cand
, bin
,
1665 spin_unlock_bh(&net
->xfrm
.xfrm_policy_lock
);
1670 for (i
= 0; i
< ARRAY_SIZE(cand
.res
); i
++) {
1671 struct xfrm_policy
*tmp
;
1673 tmp
= __xfrm_policy_bysel_ctx(cand
.res
[i
], mark
,
1679 if (!pol
|| tmp
->pos
< pol
->pos
)
1683 pol
= __xfrm_policy_bysel_ctx(chain
, mark
, if_id
, type
, dir
,
1690 *err
= security_xfrm_policy_delete(pol
->security
);
1692 spin_unlock_bh(&net
->xfrm
.xfrm_policy_lock
);
1695 __xfrm_policy_unlink(pol
, dir
);
1699 spin_unlock_bh(&net
->xfrm
.xfrm_policy_lock
);
1702 xfrm_policy_kill(ret
);
1704 xfrm_policy_inexact_prune_bin(bin
);
1707 EXPORT_SYMBOL(xfrm_policy_bysel_ctx
);
1709 struct xfrm_policy
*xfrm_policy_byid(struct net
*net
, u32 mark
, u32 if_id
,
1710 u8 type
, int dir
, u32 id
, int delete,
1713 struct xfrm_policy
*pol
, *ret
;
1714 struct hlist_head
*chain
;
1717 if (xfrm_policy_id2dir(id
) != dir
)
1721 spin_lock_bh(&net
->xfrm
.xfrm_policy_lock
);
1722 chain
= net
->xfrm
.policy_byidx
+ idx_hash(net
, id
);
1724 hlist_for_each_entry(pol
, chain
, byidx
) {
1725 if (pol
->type
== type
&& pol
->index
== id
&&
1726 pol
->if_id
== if_id
&&
1727 (mark
& pol
->mark
.m
) == pol
->mark
.v
) {
1730 *err
= security_xfrm_policy_delete(
1733 spin_unlock_bh(&net
->xfrm
.xfrm_policy_lock
);
1736 __xfrm_policy_unlink(pol
, dir
);
1742 spin_unlock_bh(&net
->xfrm
.xfrm_policy_lock
);
1745 xfrm_policy_kill(ret
);
1748 EXPORT_SYMBOL(xfrm_policy_byid
);
1750 #ifdef CONFIG_SECURITY_NETWORK_XFRM
1752 xfrm_policy_flush_secctx_check(struct net
*net
, u8 type
, bool task_valid
)
1754 struct xfrm_policy
*pol
;
1757 list_for_each_entry(pol
, &net
->xfrm
.policy_all
, walk
.all
) {
1758 if (pol
->walk
.dead
||
1759 xfrm_policy_id2dir(pol
->index
) >= XFRM_POLICY_MAX
||
1763 err
= security_xfrm_policy_delete(pol
->security
);
1765 xfrm_audit_policy_delete(pol
, 0, task_valid
);
1773 xfrm_policy_flush_secctx_check(struct net
*net
, u8 type
, bool task_valid
)
1779 int xfrm_policy_flush(struct net
*net
, u8 type
, bool task_valid
)
1781 int dir
, err
= 0, cnt
= 0;
1782 struct xfrm_policy
*pol
;
1784 spin_lock_bh(&net
->xfrm
.xfrm_policy_lock
);
1786 err
= xfrm_policy_flush_secctx_check(net
, type
, task_valid
);
1791 list_for_each_entry(pol
, &net
->xfrm
.policy_all
, walk
.all
) {
1792 dir
= xfrm_policy_id2dir(pol
->index
);
1793 if (pol
->walk
.dead
||
1794 dir
>= XFRM_POLICY_MAX
||
1798 __xfrm_policy_unlink(pol
, dir
);
1799 spin_unlock_bh(&net
->xfrm
.xfrm_policy_lock
);
1801 xfrm_audit_policy_delete(pol
, 1, task_valid
);
1802 xfrm_policy_kill(pol
);
1803 spin_lock_bh(&net
->xfrm
.xfrm_policy_lock
);
1807 __xfrm_policy_inexact_flush(net
);
1811 spin_unlock_bh(&net
->xfrm
.xfrm_policy_lock
);
1814 EXPORT_SYMBOL(xfrm_policy_flush
);
1816 int xfrm_policy_walk(struct net
*net
, struct xfrm_policy_walk
*walk
,
1817 int (*func
)(struct xfrm_policy
*, int, int, void*),
1820 struct xfrm_policy
*pol
;
1821 struct xfrm_policy_walk_entry
*x
;
1824 if (walk
->type
>= XFRM_POLICY_TYPE_MAX
&&
1825 walk
->type
!= XFRM_POLICY_TYPE_ANY
)
1828 if (list_empty(&walk
->walk
.all
) && walk
->seq
!= 0)
1831 spin_lock_bh(&net
->xfrm
.xfrm_policy_lock
);
1832 if (list_empty(&walk
->walk
.all
))
1833 x
= list_first_entry(&net
->xfrm
.policy_all
, struct xfrm_policy_walk_entry
, all
);
1835 x
= list_first_entry(&walk
->walk
.all
,
1836 struct xfrm_policy_walk_entry
, all
);
1838 list_for_each_entry_from(x
, &net
->xfrm
.policy_all
, all
) {
1841 pol
= container_of(x
, struct xfrm_policy
, walk
);
1842 if (walk
->type
!= XFRM_POLICY_TYPE_ANY
&&
1843 walk
->type
!= pol
->type
)
1845 error
= func(pol
, xfrm_policy_id2dir(pol
->index
),
1848 list_move_tail(&walk
->walk
.all
, &x
->all
);
1853 if (walk
->seq
== 0) {
1857 list_del_init(&walk
->walk
.all
);
1859 spin_unlock_bh(&net
->xfrm
.xfrm_policy_lock
);
1862 EXPORT_SYMBOL(xfrm_policy_walk
);
1864 void xfrm_policy_walk_init(struct xfrm_policy_walk
*walk
, u8 type
)
1866 INIT_LIST_HEAD(&walk
->walk
.all
);
1867 walk
->walk
.dead
= 1;
1871 EXPORT_SYMBOL(xfrm_policy_walk_init
);
1873 void xfrm_policy_walk_done(struct xfrm_policy_walk
*walk
, struct net
*net
)
1875 if (list_empty(&walk
->walk
.all
))
1878 spin_lock_bh(&net
->xfrm
.xfrm_policy_lock
); /*FIXME where is net? */
1879 list_del(&walk
->walk
.all
);
1880 spin_unlock_bh(&net
->xfrm
.xfrm_policy_lock
);
1882 EXPORT_SYMBOL(xfrm_policy_walk_done
);
1885 * Find policy to apply to this flow.
1887 * Returns 0 if policy found, else an -errno.
1889 static int xfrm_policy_match(const struct xfrm_policy
*pol
,
1890 const struct flowi
*fl
,
1891 u8 type
, u16 family
, int dir
, u32 if_id
)
1893 const struct xfrm_selector
*sel
= &pol
->selector
;
1897 if (pol
->family
!= family
||
1898 pol
->if_id
!= if_id
||
1899 (fl
->flowi_mark
& pol
->mark
.m
) != pol
->mark
.v
||
1903 match
= xfrm_selector_match(sel
, fl
, family
);
1905 ret
= security_xfrm_policy_lookup(pol
->security
, fl
->flowi_secid
,
1910 static struct xfrm_pol_inexact_node
*
1911 xfrm_policy_lookup_inexact_addr(const struct rb_root
*r
,
1913 const xfrm_address_t
*addr
, u16 family
)
1915 const struct rb_node
*parent
;
1919 seq
= read_seqcount_begin(count
);
1921 parent
= rcu_dereference_raw(r
->rb_node
);
1923 struct xfrm_pol_inexact_node
*node
;
1926 node
= rb_entry(parent
, struct xfrm_pol_inexact_node
, node
);
1928 delta
= xfrm_policy_addr_delta(addr
, &node
->addr
,
1929 node
->prefixlen
, family
);
1931 parent
= rcu_dereference_raw(parent
->rb_left
);
1933 } else if (delta
> 0) {
1934 parent
= rcu_dereference_raw(parent
->rb_right
);
1941 if (read_seqcount_retry(count
, seq
))
1948 xfrm_policy_find_inexact_candidates(struct xfrm_pol_inexact_candidates
*cand
,
1949 struct xfrm_pol_inexact_bin
*b
,
1950 const xfrm_address_t
*saddr
,
1951 const xfrm_address_t
*daddr
)
1953 struct xfrm_pol_inexact_node
*n
;
1959 family
= b
->k
.family
;
1960 memset(cand
, 0, sizeof(*cand
));
1961 cand
->res
[XFRM_POL_CAND_ANY
] = &b
->hhead
;
1963 n
= xfrm_policy_lookup_inexact_addr(&b
->root_d
, &b
->count
, daddr
,
1966 cand
->res
[XFRM_POL_CAND_DADDR
] = &n
->hhead
;
1967 n
= xfrm_policy_lookup_inexact_addr(&n
->root
, &b
->count
, saddr
,
1970 cand
->res
[XFRM_POL_CAND_BOTH
] = &n
->hhead
;
1973 n
= xfrm_policy_lookup_inexact_addr(&b
->root_s
, &b
->count
, saddr
,
1976 cand
->res
[XFRM_POL_CAND_SADDR
] = &n
->hhead
;
1981 static struct xfrm_pol_inexact_bin
*
1982 xfrm_policy_inexact_lookup_rcu(struct net
*net
, u8 type
, u16 family
,
1985 struct xfrm_pol_inexact_key k
= {
1992 write_pnet(&k
.net
, net
);
1994 return rhashtable_lookup(&xfrm_policy_inexact_table
, &k
,
1995 xfrm_pol_inexact_params
);
1998 static struct xfrm_pol_inexact_bin
*
1999 xfrm_policy_inexact_lookup(struct net
*net
, u8 type
, u16 family
,
2002 struct xfrm_pol_inexact_bin
*bin
;
2004 lockdep_assert_held(&net
->xfrm
.xfrm_policy_lock
);
2007 bin
= xfrm_policy_inexact_lookup_rcu(net
, type
, family
, dir
, if_id
);
2013 static struct xfrm_policy
*
2014 __xfrm_policy_eval_candidates(struct hlist_head
*chain
,
2015 struct xfrm_policy
*prefer
,
2016 const struct flowi
*fl
,
2017 u8 type
, u16 family
, int dir
, u32 if_id
)
2019 u32 priority
= prefer
? prefer
->priority
: ~0u;
2020 struct xfrm_policy
*pol
;
2025 hlist_for_each_entry_rcu(pol
, chain
, bydst
) {
2028 if (pol
->priority
> priority
)
2031 err
= xfrm_policy_match(pol
, fl
, type
, family
, dir
, if_id
);
2034 return ERR_PTR(err
);
2040 /* matches. Is it older than *prefer? */
2041 if (pol
->priority
== priority
&&
2042 prefer
->pos
< pol
->pos
)
2052 static struct xfrm_policy
*
2053 xfrm_policy_eval_candidates(struct xfrm_pol_inexact_candidates
*cand
,
2054 struct xfrm_policy
*prefer
,
2055 const struct flowi
*fl
,
2056 u8 type
, u16 family
, int dir
, u32 if_id
)
2058 struct xfrm_policy
*tmp
;
2061 for (i
= 0; i
< ARRAY_SIZE(cand
->res
); i
++) {
2062 tmp
= __xfrm_policy_eval_candidates(cand
->res
[i
],
2064 fl
, type
, family
, dir
,
2077 static struct xfrm_policy
*xfrm_policy_lookup_bytype(struct net
*net
, u8 type
,
2078 const struct flowi
*fl
,
2082 struct xfrm_pol_inexact_candidates cand
;
2083 const xfrm_address_t
*daddr
, *saddr
;
2084 struct xfrm_pol_inexact_bin
*bin
;
2085 struct xfrm_policy
*pol
, *ret
;
2086 struct hlist_head
*chain
;
2087 unsigned int sequence
;
2090 daddr
= xfrm_flowi_daddr(fl
, family
);
2091 saddr
= xfrm_flowi_saddr(fl
, family
);
2092 if (unlikely(!daddr
|| !saddr
))
2098 sequence
= read_seqcount_begin(&xfrm_policy_hash_generation
);
2099 chain
= policy_hash_direct(net
, daddr
, saddr
, family
, dir
);
2100 } while (read_seqcount_retry(&xfrm_policy_hash_generation
, sequence
));
2103 hlist_for_each_entry_rcu(pol
, chain
, bydst
) {
2104 err
= xfrm_policy_match(pol
, fl
, type
, family
, dir
, if_id
);
2117 bin
= xfrm_policy_inexact_lookup_rcu(net
, type
, family
, dir
, if_id
);
2118 if (!bin
|| !xfrm_policy_find_inexact_candidates(&cand
, bin
, saddr
,
2122 pol
= xfrm_policy_eval_candidates(&cand
, ret
, fl
, type
,
2123 family
, dir
, if_id
);
2131 if (read_seqcount_retry(&xfrm_policy_hash_generation
, sequence
))
2134 if (ret
&& !xfrm_pol_hold_rcu(ret
))
2142 static struct xfrm_policy
*xfrm_policy_lookup(struct net
*net
,
2143 const struct flowi
*fl
,
2144 u16 family
, u8 dir
, u32 if_id
)
2146 #ifdef CONFIG_XFRM_SUB_POLICY
2147 struct xfrm_policy
*pol
;
2149 pol
= xfrm_policy_lookup_bytype(net
, XFRM_POLICY_TYPE_SUB
, fl
, family
,
2154 return xfrm_policy_lookup_bytype(net
, XFRM_POLICY_TYPE_MAIN
, fl
, family
,
2158 static struct xfrm_policy
*xfrm_sk_policy_lookup(const struct sock
*sk
, int dir
,
2159 const struct flowi
*fl
,
2160 u16 family
, u32 if_id
)
2162 struct xfrm_policy
*pol
;
2166 pol
= rcu_dereference(sk
->sk_policy
[dir
]);
2171 if (pol
->family
!= family
) {
2176 match
= xfrm_selector_match(&pol
->selector
, fl
, family
);
2178 if ((sk
->sk_mark
& pol
->mark
.m
) != pol
->mark
.v
||
2179 pol
->if_id
!= if_id
) {
2183 err
= security_xfrm_policy_lookup(pol
->security
,
2187 if (!xfrm_pol_hold_rcu(pol
))
2189 } else if (err
== -ESRCH
) {
2202 static void __xfrm_policy_link(struct xfrm_policy
*pol
, int dir
)
2204 struct net
*net
= xp_net(pol
);
2206 list_add(&pol
->walk
.all
, &net
->xfrm
.policy_all
);
2207 net
->xfrm
.policy_count
[dir
]++;
2211 static struct xfrm_policy
*__xfrm_policy_unlink(struct xfrm_policy
*pol
,
2214 struct net
*net
= xp_net(pol
);
2216 if (list_empty(&pol
->walk
.all
))
2219 /* Socket policies are not hashed. */
2220 if (!hlist_unhashed(&pol
->bydst
)) {
2221 hlist_del_rcu(&pol
->bydst
);
2222 hlist_del_init(&pol
->bydst_inexact_list
);
2223 hlist_del(&pol
->byidx
);
2226 list_del_init(&pol
->walk
.all
);
2227 net
->xfrm
.policy_count
[dir
]--;
2232 static void xfrm_sk_policy_link(struct xfrm_policy
*pol
, int dir
)
2234 __xfrm_policy_link(pol
, XFRM_POLICY_MAX
+ dir
);
2237 static void xfrm_sk_policy_unlink(struct xfrm_policy
*pol
, int dir
)
2239 __xfrm_policy_unlink(pol
, XFRM_POLICY_MAX
+ dir
);
2242 int xfrm_policy_delete(struct xfrm_policy
*pol
, int dir
)
2244 struct net
*net
= xp_net(pol
);
2246 spin_lock_bh(&net
->xfrm
.xfrm_policy_lock
);
2247 pol
= __xfrm_policy_unlink(pol
, dir
);
2248 spin_unlock_bh(&net
->xfrm
.xfrm_policy_lock
);
2250 xfrm_policy_kill(pol
);
2255 EXPORT_SYMBOL(xfrm_policy_delete
);
2257 int xfrm_sk_policy_insert(struct sock
*sk
, int dir
, struct xfrm_policy
*pol
)
2259 struct net
*net
= sock_net(sk
);
2260 struct xfrm_policy
*old_pol
;
2262 #ifdef CONFIG_XFRM_SUB_POLICY
2263 if (pol
&& pol
->type
!= XFRM_POLICY_TYPE_MAIN
)
2267 spin_lock_bh(&net
->xfrm
.xfrm_policy_lock
);
2268 old_pol
= rcu_dereference_protected(sk
->sk_policy
[dir
],
2269 lockdep_is_held(&net
->xfrm
.xfrm_policy_lock
));
2271 pol
->curlft
.add_time
= ktime_get_real_seconds();
2272 pol
->index
= xfrm_gen_index(net
, XFRM_POLICY_MAX
+dir
, 0);
2273 xfrm_sk_policy_link(pol
, dir
);
2275 rcu_assign_pointer(sk
->sk_policy
[dir
], pol
);
2278 xfrm_policy_requeue(old_pol
, pol
);
2280 /* Unlinking succeeds always. This is the only function
2281 * allowed to delete or replace socket policy.
2283 xfrm_sk_policy_unlink(old_pol
, dir
);
2285 spin_unlock_bh(&net
->xfrm
.xfrm_policy_lock
);
2288 xfrm_policy_kill(old_pol
);
2293 static struct xfrm_policy
*clone_policy(const struct xfrm_policy
*old
, int dir
)
2295 struct xfrm_policy
*newp
= xfrm_policy_alloc(xp_net(old
), GFP_ATOMIC
);
2296 struct net
*net
= xp_net(old
);
2299 newp
->selector
= old
->selector
;
2300 if (security_xfrm_policy_clone(old
->security
,
2303 return NULL
; /* ENOMEM */
2305 newp
->lft
= old
->lft
;
2306 newp
->curlft
= old
->curlft
;
2307 newp
->mark
= old
->mark
;
2308 newp
->if_id
= old
->if_id
;
2309 newp
->action
= old
->action
;
2310 newp
->flags
= old
->flags
;
2311 newp
->xfrm_nr
= old
->xfrm_nr
;
2312 newp
->index
= old
->index
;
2313 newp
->type
= old
->type
;
2314 newp
->family
= old
->family
;
2315 memcpy(newp
->xfrm_vec
, old
->xfrm_vec
,
2316 newp
->xfrm_nr
*sizeof(struct xfrm_tmpl
));
2317 spin_lock_bh(&net
->xfrm
.xfrm_policy_lock
);
2318 xfrm_sk_policy_link(newp
, dir
);
2319 spin_unlock_bh(&net
->xfrm
.xfrm_policy_lock
);
2325 int __xfrm_sk_clone_policy(struct sock
*sk
, const struct sock
*osk
)
2327 const struct xfrm_policy
*p
;
2328 struct xfrm_policy
*np
;
2332 for (i
= 0; i
< 2; i
++) {
2333 p
= rcu_dereference(osk
->sk_policy
[i
]);
2335 np
= clone_policy(p
, i
);
2336 if (unlikely(!np
)) {
2340 rcu_assign_pointer(sk
->sk_policy
[i
], np
);
2348 xfrm_get_saddr(struct net
*net
, int oif
, xfrm_address_t
*local
,
2349 xfrm_address_t
*remote
, unsigned short family
, u32 mark
)
2352 const struct xfrm_policy_afinfo
*afinfo
= xfrm_policy_get_afinfo(family
);
2354 if (unlikely(afinfo
== NULL
))
2356 err
= afinfo
->get_saddr(net
, oif
, local
, remote
, mark
);
2361 /* Resolve list of templates for the flow, given policy. */
2364 xfrm_tmpl_resolve_one(struct xfrm_policy
*policy
, const struct flowi
*fl
,
2365 struct xfrm_state
**xfrm
, unsigned short family
)
2367 struct net
*net
= xp_net(policy
);
2370 xfrm_address_t
*daddr
= xfrm_flowi_daddr(fl
, family
);
2371 xfrm_address_t
*saddr
= xfrm_flowi_saddr(fl
, family
);
2374 for (nx
= 0, i
= 0; i
< policy
->xfrm_nr
; i
++) {
2375 struct xfrm_state
*x
;
2376 xfrm_address_t
*remote
= daddr
;
2377 xfrm_address_t
*local
= saddr
;
2378 struct xfrm_tmpl
*tmpl
= &policy
->xfrm_vec
[i
];
2380 if (tmpl
->mode
== XFRM_MODE_TUNNEL
||
2381 tmpl
->mode
== XFRM_MODE_BEET
) {
2382 remote
= &tmpl
->id
.daddr
;
2383 local
= &tmpl
->saddr
;
2384 if (xfrm_addr_any(local
, tmpl
->encap_family
)) {
2385 error
= xfrm_get_saddr(net
, fl
->flowi_oif
,
2387 tmpl
->encap_family
, 0);
2394 x
= xfrm_state_find(remote
, local
, fl
, tmpl
, policy
, &error
,
2395 family
, policy
->if_id
);
2397 if (x
&& x
->km
.state
== XFRM_STATE_VALID
) {
2404 error
= (x
->km
.state
== XFRM_STATE_ERROR
?
2407 } else if (error
== -ESRCH
) {
2411 if (!tmpl
->optional
)
2417 for (nx
--; nx
>= 0; nx
--)
2418 xfrm_state_put(xfrm
[nx
]);
2423 xfrm_tmpl_resolve(struct xfrm_policy
**pols
, int npols
, const struct flowi
*fl
,
2424 struct xfrm_state
**xfrm
, unsigned short family
)
2426 struct xfrm_state
*tp
[XFRM_MAX_DEPTH
];
2427 struct xfrm_state
**tpp
= (npols
> 1) ? tp
: xfrm
;
2433 for (i
= 0; i
< npols
; i
++) {
2434 if (cnx
+ pols
[i
]->xfrm_nr
>= XFRM_MAX_DEPTH
) {
2439 ret
= xfrm_tmpl_resolve_one(pols
[i
], fl
, &tpp
[cnx
], family
);
2447 /* found states are sorted for outbound processing */
2449 xfrm_state_sort(xfrm
, tpp
, cnx
, family
);
2454 for (cnx
--; cnx
>= 0; cnx
--)
2455 xfrm_state_put(tpp
[cnx
]);
2460 static int xfrm_get_tos(const struct flowi
*fl
, int family
)
2462 if (family
== AF_INET
)
2463 return IPTOS_RT_MASK
& fl
->u
.ip4
.flowi4_tos
;
2468 static inline struct xfrm_dst
*xfrm_alloc_dst(struct net
*net
, int family
)
2470 const struct xfrm_policy_afinfo
*afinfo
= xfrm_policy_get_afinfo(family
);
2471 struct dst_ops
*dst_ops
;
2472 struct xfrm_dst
*xdst
;
2475 return ERR_PTR(-EINVAL
);
2479 dst_ops
= &net
->xfrm
.xfrm4_dst_ops
;
2481 #if IS_ENABLED(CONFIG_IPV6)
2483 dst_ops
= &net
->xfrm
.xfrm6_dst_ops
;
2489 xdst
= dst_alloc(dst_ops
, NULL
, 1, DST_OBSOLETE_NONE
, 0);
2492 struct dst_entry
*dst
= &xdst
->u
.dst
;
2494 memset(dst
+ 1, 0, sizeof(*xdst
) - sizeof(*dst
));
2496 xdst
= ERR_PTR(-ENOBUFS
);
2503 static void xfrm_init_path(struct xfrm_dst
*path
, struct dst_entry
*dst
,
2506 if (dst
->ops
->family
== AF_INET6
) {
2507 struct rt6_info
*rt
= (struct rt6_info
*)dst
;
2508 path
->path_cookie
= rt6_get_cookie(rt
);
2509 path
->u
.rt6
.rt6i_nfheader_len
= nfheader_len
;
2513 static inline int xfrm_fill_dst(struct xfrm_dst
*xdst
, struct net_device
*dev
,
2514 const struct flowi
*fl
)
2516 const struct xfrm_policy_afinfo
*afinfo
=
2517 xfrm_policy_get_afinfo(xdst
->u
.dst
.ops
->family
);
2523 err
= afinfo
->fill_dst(xdst
, dev
, fl
);
2531 /* Allocate chain of dst_entry's, attach known xfrm's, calculate
2532 * all the metrics... Shortly, bundle a bundle.
2535 static struct dst_entry
*xfrm_bundle_create(struct xfrm_policy
*policy
,
2536 struct xfrm_state
**xfrm
,
2537 struct xfrm_dst
**bundle
,
2539 const struct flowi
*fl
,
2540 struct dst_entry
*dst
)
2542 const struct xfrm_state_afinfo
*afinfo
;
2543 const struct xfrm_mode
*inner_mode
;
2544 struct net
*net
= xp_net(policy
);
2545 unsigned long now
= jiffies
;
2546 struct net_device
*dev
;
2547 struct xfrm_dst
*xdst_prev
= NULL
;
2548 struct xfrm_dst
*xdst0
= NULL
;
2552 int nfheader_len
= 0;
2553 int trailer_len
= 0;
2555 int family
= policy
->selector
.family
;
2556 xfrm_address_t saddr
, daddr
;
2558 xfrm_flowi_addr_get(fl
, &saddr
, &daddr
, family
);
2560 tos
= xfrm_get_tos(fl
, family
);
2564 for (; i
< nx
; i
++) {
2565 struct xfrm_dst
*xdst
= xfrm_alloc_dst(net
, family
);
2566 struct dst_entry
*dst1
= &xdst
->u
.dst
;
2568 err
= PTR_ERR(xdst
);
2578 /* Ref count is taken during xfrm_alloc_dst()
2579 * No need to do dst_clone() on dst1
2581 xfrm_dst_set_child(xdst_prev
, &xdst
->u
.dst
);
2583 if (xfrm
[i
]->sel
.family
== AF_UNSPEC
) {
2584 inner_mode
= xfrm_ip2inner_mode(xfrm
[i
],
2585 xfrm_af2proto(family
));
2587 err
= -EAFNOSUPPORT
;
2592 inner_mode
= &xfrm
[i
]->inner_mode
;
2595 dst_copy_metrics(dst1
, dst
);
2597 if (xfrm
[i
]->props
.mode
!= XFRM_MODE_TRANSPORT
) {
2600 if (xfrm
[i
]->props
.smark
.v
|| xfrm
[i
]->props
.smark
.m
)
2601 mark
= xfrm_smark_get(fl
->flowi_mark
, xfrm
[i
]);
2603 family
= xfrm
[i
]->props
.family
;
2604 dst
= xfrm_dst_lookup(xfrm
[i
], tos
, fl
->flowi_oif
,
2605 &saddr
, &daddr
, family
, mark
);
2612 dst1
->xfrm
= xfrm
[i
];
2613 xdst
->xfrm_genid
= xfrm
[i
]->genid
;
2615 dst1
->obsolete
= DST_OBSOLETE_FORCE_CHK
;
2616 dst1
->flags
|= DST_HOST
;
2617 dst1
->lastuse
= now
;
2619 dst1
->input
= dst_discard
;
2622 afinfo
= xfrm_state_afinfo_get_rcu(inner_mode
->family
);
2624 dst1
->output
= afinfo
->output
;
2626 dst1
->output
= dst_discard_out
;
2631 header_len
+= xfrm
[i
]->props
.header_len
;
2632 if (xfrm
[i
]->type
->flags
& XFRM_TYPE_NON_FRAGMENT
)
2633 nfheader_len
+= xfrm
[i
]->props
.header_len
;
2634 trailer_len
+= xfrm
[i
]->props
.trailer_len
;
2637 xfrm_dst_set_child(xdst_prev
, dst
);
2645 xfrm_init_path(xdst0
, dst
, nfheader_len
);
2646 xfrm_init_pmtu(bundle
, nx
);
2648 for (xdst_prev
= xdst0
; xdst_prev
!= (struct xfrm_dst
*)dst
;
2649 xdst_prev
= (struct xfrm_dst
*) xfrm_dst_child(&xdst_prev
->u
.dst
)) {
2650 err
= xfrm_fill_dst(xdst_prev
, dev
, fl
);
2654 xdst_prev
->u
.dst
.header_len
= header_len
;
2655 xdst_prev
->u
.dst
.trailer_len
= trailer_len
;
2656 header_len
-= xdst_prev
->u
.dst
.xfrm
->props
.header_len
;
2657 trailer_len
-= xdst_prev
->u
.dst
.xfrm
->props
.trailer_len
;
2660 return &xdst0
->u
.dst
;
2664 xfrm_state_put(xfrm
[i
]);
2667 dst_release_immediate(&xdst0
->u
.dst
);
2669 return ERR_PTR(err
);
2672 static int xfrm_expand_policies(const struct flowi
*fl
, u16 family
,
2673 struct xfrm_policy
**pols
,
2674 int *num_pols
, int *num_xfrms
)
2678 if (*num_pols
== 0 || !pols
[0]) {
2683 if (IS_ERR(pols
[0]))
2684 return PTR_ERR(pols
[0]);
2686 *num_xfrms
= pols
[0]->xfrm_nr
;
2688 #ifdef CONFIG_XFRM_SUB_POLICY
2689 if (pols
[0] && pols
[0]->action
== XFRM_POLICY_ALLOW
&&
2690 pols
[0]->type
!= XFRM_POLICY_TYPE_MAIN
) {
2691 pols
[1] = xfrm_policy_lookup_bytype(xp_net(pols
[0]),
2692 XFRM_POLICY_TYPE_MAIN
,
2697 if (IS_ERR(pols
[1])) {
2698 xfrm_pols_put(pols
, *num_pols
);
2699 return PTR_ERR(pols
[1]);
2702 (*num_xfrms
) += pols
[1]->xfrm_nr
;
2706 for (i
= 0; i
< *num_pols
; i
++) {
2707 if (pols
[i
]->action
!= XFRM_POLICY_ALLOW
) {
2717 static struct xfrm_dst
*
2718 xfrm_resolve_and_create_bundle(struct xfrm_policy
**pols
, int num_pols
,
2719 const struct flowi
*fl
, u16 family
,
2720 struct dst_entry
*dst_orig
)
2722 struct net
*net
= xp_net(pols
[0]);
2723 struct xfrm_state
*xfrm
[XFRM_MAX_DEPTH
];
2724 struct xfrm_dst
*bundle
[XFRM_MAX_DEPTH
];
2725 struct xfrm_dst
*xdst
;
2726 struct dst_entry
*dst
;
2729 /* Try to instantiate a bundle */
2730 err
= xfrm_tmpl_resolve(pols
, num_pols
, fl
, xfrm
, family
);
2736 XFRM_INC_STATS(net
, LINUX_MIB_XFRMOUTPOLERROR
);
2737 return ERR_PTR(err
);
2740 dst
= xfrm_bundle_create(pols
[0], xfrm
, bundle
, err
, fl
, dst_orig
);
2742 XFRM_INC_STATS(net
, LINUX_MIB_XFRMOUTBUNDLEGENERROR
);
2743 return ERR_CAST(dst
);
2746 xdst
= (struct xfrm_dst
*)dst
;
2747 xdst
->num_xfrms
= err
;
2748 xdst
->num_pols
= num_pols
;
2749 memcpy(xdst
->pols
, pols
, sizeof(struct xfrm_policy
*) * num_pols
);
2750 xdst
->policy_genid
= atomic_read(&pols
[0]->genid
);
2755 static void xfrm_policy_queue_process(struct timer_list
*t
)
2757 struct sk_buff
*skb
;
2759 struct dst_entry
*dst
;
2760 struct xfrm_policy
*pol
= from_timer(pol
, t
, polq
.hold_timer
);
2761 struct net
*net
= xp_net(pol
);
2762 struct xfrm_policy_queue
*pq
= &pol
->polq
;
2764 struct sk_buff_head list
;
2766 spin_lock(&pq
->hold_queue
.lock
);
2767 skb
= skb_peek(&pq
->hold_queue
);
2769 spin_unlock(&pq
->hold_queue
.lock
);
2774 xfrm_decode_session(skb
, &fl
, dst
->ops
->family
);
2775 spin_unlock(&pq
->hold_queue
.lock
);
2777 dst_hold(xfrm_dst_path(dst
));
2778 dst
= xfrm_lookup(net
, xfrm_dst_path(dst
), &fl
, sk
, XFRM_LOOKUP_QUEUE
);
2782 if (dst
->flags
& DST_XFRM_QUEUE
) {
2785 if (pq
->timeout
>= XFRM_QUEUE_TMO_MAX
)
2788 pq
->timeout
= pq
->timeout
<< 1;
2789 if (!mod_timer(&pq
->hold_timer
, jiffies
+ pq
->timeout
))
2796 __skb_queue_head_init(&list
);
2798 spin_lock(&pq
->hold_queue
.lock
);
2800 skb_queue_splice_init(&pq
->hold_queue
, &list
);
2801 spin_unlock(&pq
->hold_queue
.lock
);
2803 while (!skb_queue_empty(&list
)) {
2804 skb
= __skb_dequeue(&list
);
2806 xfrm_decode_session(skb
, &fl
, skb_dst(skb
)->ops
->family
);
2807 dst_hold(xfrm_dst_path(skb_dst(skb
)));
2808 dst
= xfrm_lookup(net
, xfrm_dst_path(skb_dst(skb
)), &fl
, skb
->sk
, 0);
2816 skb_dst_set(skb
, dst
);
2818 dst_output(net
, skb
->sk
, skb
);
2827 skb_queue_purge(&pq
->hold_queue
);
2831 static int xdst_queue_output(struct net
*net
, struct sock
*sk
, struct sk_buff
*skb
)
2833 unsigned long sched_next
;
2834 struct dst_entry
*dst
= skb_dst(skb
);
2835 struct xfrm_dst
*xdst
= (struct xfrm_dst
*) dst
;
2836 struct xfrm_policy
*pol
= xdst
->pols
[0];
2837 struct xfrm_policy_queue
*pq
= &pol
->polq
;
2839 if (unlikely(skb_fclone_busy(sk
, skb
))) {
2844 if (pq
->hold_queue
.qlen
> XFRM_MAX_QUEUE_LEN
) {
2851 spin_lock_bh(&pq
->hold_queue
.lock
);
2854 pq
->timeout
= XFRM_QUEUE_TMO_MIN
;
2856 sched_next
= jiffies
+ pq
->timeout
;
2858 if (del_timer(&pq
->hold_timer
)) {
2859 if (time_before(pq
->hold_timer
.expires
, sched_next
))
2860 sched_next
= pq
->hold_timer
.expires
;
2864 __skb_queue_tail(&pq
->hold_queue
, skb
);
2865 if (!mod_timer(&pq
->hold_timer
, sched_next
))
2868 spin_unlock_bh(&pq
->hold_queue
.lock
);
2873 static struct xfrm_dst
*xfrm_create_dummy_bundle(struct net
*net
,
2874 struct xfrm_flo
*xflo
,
2875 const struct flowi
*fl
,
2880 struct net_device
*dev
;
2881 struct dst_entry
*dst
;
2882 struct dst_entry
*dst1
;
2883 struct xfrm_dst
*xdst
;
2885 xdst
= xfrm_alloc_dst(net
, family
);
2889 if (!(xflo
->flags
& XFRM_LOOKUP_QUEUE
) ||
2890 net
->xfrm
.sysctl_larval_drop
||
2894 dst
= xflo
->dst_orig
;
2895 dst1
= &xdst
->u
.dst
;
2899 dst_copy_metrics(dst1
, dst
);
2901 dst1
->obsolete
= DST_OBSOLETE_FORCE_CHK
;
2902 dst1
->flags
|= DST_HOST
| DST_XFRM_QUEUE
;
2903 dst1
->lastuse
= jiffies
;
2905 dst1
->input
= dst_discard
;
2906 dst1
->output
= xdst_queue_output
;
2909 xfrm_dst_set_child(xdst
, dst
);
2912 xfrm_init_path((struct xfrm_dst
*)dst1
, dst
, 0);
2919 err
= xfrm_fill_dst(xdst
, dev
, fl
);
2928 xdst
= ERR_PTR(err
);
2932 static struct xfrm_dst
*xfrm_bundle_lookup(struct net
*net
,
2933 const struct flowi
*fl
,
2935 struct xfrm_flo
*xflo
, u32 if_id
)
2937 struct xfrm_policy
*pols
[XFRM_POLICY_TYPE_MAX
];
2938 int num_pols
= 0, num_xfrms
= 0, err
;
2939 struct xfrm_dst
*xdst
;
2941 /* Resolve policies to use if we couldn't get them from
2942 * previous cache entry */
2944 pols
[0] = xfrm_policy_lookup(net
, fl
, family
, dir
, if_id
);
2945 err
= xfrm_expand_policies(fl
, family
, pols
,
2946 &num_pols
, &num_xfrms
);
2952 goto make_dummy_bundle
;
2954 xdst
= xfrm_resolve_and_create_bundle(pols
, num_pols
, fl
, family
,
2957 err
= PTR_ERR(xdst
);
2958 if (err
== -EREMOTE
) {
2959 xfrm_pols_put(pols
, num_pols
);
2965 goto make_dummy_bundle
;
2966 } else if (xdst
== NULL
) {
2968 goto make_dummy_bundle
;
2974 /* We found policies, but there's no bundles to instantiate:
2975 * either because the policy blocks, has no transformations or
2976 * we could not build template (no xfrm_states).*/
2977 xdst
= xfrm_create_dummy_bundle(net
, xflo
, fl
, num_xfrms
, family
);
2979 xfrm_pols_put(pols
, num_pols
);
2980 return ERR_CAST(xdst
);
2982 xdst
->num_pols
= num_pols
;
2983 xdst
->num_xfrms
= num_xfrms
;
2984 memcpy(xdst
->pols
, pols
, sizeof(struct xfrm_policy
*) * num_pols
);
2989 XFRM_INC_STATS(net
, LINUX_MIB_XFRMOUTPOLERROR
);
2991 xfrm_pols_put(pols
, num_pols
);
2992 return ERR_PTR(err
);
2995 static struct dst_entry
*make_blackhole(struct net
*net
, u16 family
,
2996 struct dst_entry
*dst_orig
)
2998 const struct xfrm_policy_afinfo
*afinfo
= xfrm_policy_get_afinfo(family
);
2999 struct dst_entry
*ret
;
3002 dst_release(dst_orig
);
3003 return ERR_PTR(-EINVAL
);
3005 ret
= afinfo
->blackhole_route(net
, dst_orig
);
3012 /* Finds/creates a bundle for given flow and if_id
3014 * At the moment we eat a raw IP route. Mostly to speed up lookups
3015 * on interfaces with disabled IPsec.
3017 * xfrm_lookup uses an if_id of 0 by default, and is provided for
3020 struct dst_entry
*xfrm_lookup_with_ifid(struct net
*net
,
3021 struct dst_entry
*dst_orig
,
3022 const struct flowi
*fl
,
3023 const struct sock
*sk
,
3024 int flags
, u32 if_id
)
3026 struct xfrm_policy
*pols
[XFRM_POLICY_TYPE_MAX
];
3027 struct xfrm_dst
*xdst
;
3028 struct dst_entry
*dst
, *route
;
3029 u16 family
= dst_orig
->ops
->family
;
3030 u8 dir
= XFRM_POLICY_OUT
;
3031 int i
, err
, num_pols
, num_xfrms
= 0, drop_pols
= 0;
3037 sk
= sk_const_to_full_sk(sk
);
3038 if (sk
&& sk
->sk_policy
[XFRM_POLICY_OUT
]) {
3040 pols
[0] = xfrm_sk_policy_lookup(sk
, XFRM_POLICY_OUT
, fl
, family
,
3042 err
= xfrm_expand_policies(fl
, family
, pols
,
3043 &num_pols
, &num_xfrms
);
3048 if (num_xfrms
<= 0) {
3049 drop_pols
= num_pols
;
3053 xdst
= xfrm_resolve_and_create_bundle(
3058 xfrm_pols_put(pols
, num_pols
);
3059 err
= PTR_ERR(xdst
);
3060 if (err
== -EREMOTE
)
3064 } else if (xdst
== NULL
) {
3066 drop_pols
= num_pols
;
3070 route
= xdst
->route
;
3075 struct xfrm_flo xflo
;
3077 xflo
.dst_orig
= dst_orig
;
3080 /* To accelerate a bit... */
3081 if ((dst_orig
->flags
& DST_NOXFRM
) ||
3082 !net
->xfrm
.policy_count
[XFRM_POLICY_OUT
])
3085 xdst
= xfrm_bundle_lookup(net
, fl
, family
, dir
, &xflo
, if_id
);
3089 err
= PTR_ERR(xdst
);
3093 num_pols
= xdst
->num_pols
;
3094 num_xfrms
= xdst
->num_xfrms
;
3095 memcpy(pols
, xdst
->pols
, sizeof(struct xfrm_policy
*) * num_pols
);
3096 route
= xdst
->route
;
3100 if (route
== NULL
&& num_xfrms
> 0) {
3101 /* The only case when xfrm_bundle_lookup() returns a
3102 * bundle with null route, is when the template could
3103 * not be resolved. It means policies are there, but
3104 * bundle could not be created, since we don't yet
3105 * have the xfrm_state's. We need to wait for KM to
3106 * negotiate new SA's or bail out with error.*/
3107 if (net
->xfrm
.sysctl_larval_drop
) {
3108 XFRM_INC_STATS(net
, LINUX_MIB_XFRMOUTNOSTATES
);
3115 XFRM_INC_STATS(net
, LINUX_MIB_XFRMOUTNOSTATES
);
3123 if ((flags
& XFRM_LOOKUP_ICMP
) &&
3124 !(pols
[0]->flags
& XFRM_POLICY_ICMP
)) {
3129 for (i
= 0; i
< num_pols
; i
++)
3130 pols
[i
]->curlft
.use_time
= ktime_get_real_seconds();
3132 if (num_xfrms
< 0) {
3133 /* Prohibit the flow */
3134 XFRM_INC_STATS(net
, LINUX_MIB_XFRMOUTPOLBLOCK
);
3137 } else if (num_xfrms
> 0) {
3138 /* Flow transformed */
3139 dst_release(dst_orig
);
3141 /* Flow passes untransformed */
3146 xfrm_pols_put(pols
, drop_pols
);
3147 if (dst
&& dst
->xfrm
&&
3148 dst
->xfrm
->props
.mode
== XFRM_MODE_TUNNEL
)
3149 dst
->flags
|= DST_XFRM_TUNNEL
;
3153 if (!(flags
& XFRM_LOOKUP_ICMP
)) {
3161 if (!(flags
& XFRM_LOOKUP_KEEP_DST_REF
))
3162 dst_release(dst_orig
);
3163 xfrm_pols_put(pols
, drop_pols
);
3164 return ERR_PTR(err
);
3166 EXPORT_SYMBOL(xfrm_lookup_with_ifid
);
3168 /* Main function: finds/creates a bundle for given flow.
3170 * At the moment we eat a raw IP route. Mostly to speed up lookups
3171 * on interfaces with disabled IPsec.
3173 struct dst_entry
*xfrm_lookup(struct net
*net
, struct dst_entry
*dst_orig
,
3174 const struct flowi
*fl
, const struct sock
*sk
,
3177 return xfrm_lookup_with_ifid(net
, dst_orig
, fl
, sk
, flags
, 0);
3179 EXPORT_SYMBOL(xfrm_lookup
);
3181 /* Callers of xfrm_lookup_route() must ensure a call to dst_output().
3182 * Otherwise we may send out blackholed packets.
3184 struct dst_entry
*xfrm_lookup_route(struct net
*net
, struct dst_entry
*dst_orig
,
3185 const struct flowi
*fl
,
3186 const struct sock
*sk
, int flags
)
3188 struct dst_entry
*dst
= xfrm_lookup(net
, dst_orig
, fl
, sk
,
3189 flags
| XFRM_LOOKUP_QUEUE
|
3190 XFRM_LOOKUP_KEEP_DST_REF
);
3192 if (PTR_ERR(dst
) == -EREMOTE
)
3193 return make_blackhole(net
, dst_orig
->ops
->family
, dst_orig
);
3196 dst_release(dst_orig
);
3200 EXPORT_SYMBOL(xfrm_lookup_route
);
3203 xfrm_secpath_reject(int idx
, struct sk_buff
*skb
, const struct flowi
*fl
)
3205 struct sec_path
*sp
= skb_sec_path(skb
);
3206 struct xfrm_state
*x
;
3208 if (!sp
|| idx
< 0 || idx
>= sp
->len
)
3211 if (!x
->type
->reject
)
3213 return x
->type
->reject(x
, skb
, fl
);
3216 /* When skb is transformed back to its "native" form, we have to
3217 * check policy restrictions. At the moment we make this in maximally
3218 * stupid way. Shame on me. :-) Of course, connected sockets must
3219 * have policy cached at them.
3223 xfrm_state_ok(const struct xfrm_tmpl
*tmpl
, const struct xfrm_state
*x
,
3224 unsigned short family
)
3226 if (xfrm_state_kern(x
))
3227 return tmpl
->optional
&& !xfrm_state_addr_cmp(tmpl
, x
, tmpl
->encap_family
);
3228 return x
->id
.proto
== tmpl
->id
.proto
&&
3229 (x
->id
.spi
== tmpl
->id
.spi
|| !tmpl
->id
.spi
) &&
3230 (x
->props
.reqid
== tmpl
->reqid
|| !tmpl
->reqid
) &&
3231 x
->props
.mode
== tmpl
->mode
&&
3232 (tmpl
->allalgs
|| (tmpl
->aalgos
& (1<<x
->props
.aalgo
)) ||
3233 !(xfrm_id_proto_match(tmpl
->id
.proto
, IPSEC_PROTO_ANY
))) &&
3234 !(x
->props
.mode
!= XFRM_MODE_TRANSPORT
&&
3235 xfrm_state_addr_cmp(tmpl
, x
, family
));
3239 * 0 or more than 0 is returned when validation is succeeded (either bypass
3240 * because of optional transport mode, or next index of the mathced secpath
3241 * state with the template.
3242 * -1 is returned when no matching template is found.
3243 * Otherwise "-2 - errored_index" is returned.
3246 xfrm_policy_ok(const struct xfrm_tmpl
*tmpl
, const struct sec_path
*sp
, int start
,
3247 unsigned short family
)
3251 if (tmpl
->optional
) {
3252 if (tmpl
->mode
== XFRM_MODE_TRANSPORT
)
3256 for (; idx
< sp
->len
; idx
++) {
3257 if (xfrm_state_ok(tmpl
, sp
->xvec
[idx
], family
))
3259 if (sp
->xvec
[idx
]->props
.mode
!= XFRM_MODE_TRANSPORT
) {
3269 decode_session4(struct sk_buff
*skb
, struct flowi
*fl
, bool reverse
)
3271 const struct iphdr
*iph
= ip_hdr(skb
);
3273 u8
*xprth
= skb_network_header(skb
) + ihl
* 4;
3274 struct flowi4
*fl4
= &fl
->u
.ip4
;
3277 if (skb_dst(skb
) && skb_dst(skb
)->dev
)
3278 oif
= skb_dst(skb
)->dev
->ifindex
;
3280 memset(fl4
, 0, sizeof(struct flowi4
));
3281 fl4
->flowi4_mark
= skb
->mark
;
3282 fl4
->flowi4_oif
= reverse
? skb
->skb_iif
: oif
;
3284 fl4
->flowi4_proto
= iph
->protocol
;
3285 fl4
->daddr
= reverse
? iph
->saddr
: iph
->daddr
;
3286 fl4
->saddr
= reverse
? iph
->daddr
: iph
->saddr
;
3287 fl4
->flowi4_tos
= iph
->tos
;
3289 if (!ip_is_fragment(iph
)) {
3290 switch (iph
->protocol
) {
3292 case IPPROTO_UDPLITE
:
3296 if (xprth
+ 4 < skb
->data
||
3297 pskb_may_pull(skb
, xprth
+ 4 - skb
->data
)) {
3300 xprth
= skb_network_header(skb
) + ihl
* 4;
3301 ports
= (__be16
*)xprth
;
3303 fl4
->fl4_sport
= ports
[!!reverse
];
3304 fl4
->fl4_dport
= ports
[!reverse
];
3308 if (xprth
+ 2 < skb
->data
||
3309 pskb_may_pull(skb
, xprth
+ 2 - skb
->data
)) {
3312 xprth
= skb_network_header(skb
) + ihl
* 4;
3315 fl4
->fl4_icmp_type
= icmp
[0];
3316 fl4
->fl4_icmp_code
= icmp
[1];
3320 if (xprth
+ 4 < skb
->data
||
3321 pskb_may_pull(skb
, xprth
+ 4 - skb
->data
)) {
3324 xprth
= skb_network_header(skb
) + ihl
* 4;
3325 ehdr
= (__be32
*)xprth
;
3327 fl4
->fl4_ipsec_spi
= ehdr
[0];
3331 if (xprth
+ 8 < skb
->data
||
3332 pskb_may_pull(skb
, xprth
+ 8 - skb
->data
)) {
3335 xprth
= skb_network_header(skb
) + ihl
* 4;
3336 ah_hdr
= (__be32
*)xprth
;
3338 fl4
->fl4_ipsec_spi
= ah_hdr
[1];
3342 if (xprth
+ 4 < skb
->data
||
3343 pskb_may_pull(skb
, xprth
+ 4 - skb
->data
)) {
3346 xprth
= skb_network_header(skb
) + ihl
* 4;
3347 ipcomp_hdr
= (__be16
*)xprth
;
3349 fl4
->fl4_ipsec_spi
= htonl(ntohs(ipcomp_hdr
[1]));
3353 if (xprth
+ 12 < skb
->data
||
3354 pskb_may_pull(skb
, xprth
+ 12 - skb
->data
)) {
3358 xprth
= skb_network_header(skb
) + ihl
* 4;
3359 greflags
= (__be16
*)xprth
;
3360 gre_hdr
= (__be32
*)xprth
;
3362 if (greflags
[0] & GRE_KEY
) {
3363 if (greflags
[0] & GRE_CSUM
)
3365 fl4
->fl4_gre_key
= gre_hdr
[1];
3370 fl4
->fl4_ipsec_spi
= 0;
3376 #if IS_ENABLED(CONFIG_IPV6)
3378 decode_session6(struct sk_buff
*skb
, struct flowi
*fl
, bool reverse
)
3380 struct flowi6
*fl6
= &fl
->u
.ip6
;
3382 const struct ipv6hdr
*hdr
= ipv6_hdr(skb
);
3383 u32 offset
= sizeof(*hdr
);
3384 struct ipv6_opt_hdr
*exthdr
;
3385 const unsigned char *nh
= skb_network_header(skb
);
3386 u16 nhoff
= IP6CB(skb
)->nhoff
;
3391 nhoff
= offsetof(struct ipv6hdr
, nexthdr
);
3393 nexthdr
= nh
[nhoff
];
3395 if (skb_dst(skb
) && skb_dst(skb
)->dev
)
3396 oif
= skb_dst(skb
)->dev
->ifindex
;
3398 memset(fl6
, 0, sizeof(struct flowi6
));
3399 fl6
->flowi6_mark
= skb
->mark
;
3400 fl6
->flowi6_oif
= reverse
? skb
->skb_iif
: oif
;
3402 fl6
->daddr
= reverse
? hdr
->saddr
: hdr
->daddr
;
3403 fl6
->saddr
= reverse
? hdr
->daddr
: hdr
->saddr
;
3405 while (nh
+ offset
+ sizeof(*exthdr
) < skb
->data
||
3406 pskb_may_pull(skb
, nh
+ offset
+ sizeof(*exthdr
) - skb
->data
)) {
3407 nh
= skb_network_header(skb
);
3408 exthdr
= (struct ipv6_opt_hdr
*)(nh
+ offset
);
3411 case NEXTHDR_FRAGMENT
:
3414 case NEXTHDR_ROUTING
:
3417 offset
+= ipv6_optlen(exthdr
);
3418 nexthdr
= exthdr
->nexthdr
;
3419 exthdr
= (struct ipv6_opt_hdr
*)(nh
+ offset
);
3422 case IPPROTO_UDPLITE
:
3426 if (!onlyproto
&& (nh
+ offset
+ 4 < skb
->data
||
3427 pskb_may_pull(skb
, nh
+ offset
+ 4 - skb
->data
))) {
3430 nh
= skb_network_header(skb
);
3431 ports
= (__be16
*)(nh
+ offset
);
3432 fl6
->fl6_sport
= ports
[!!reverse
];
3433 fl6
->fl6_dport
= ports
[!reverse
];
3435 fl6
->flowi6_proto
= nexthdr
;
3437 case IPPROTO_ICMPV6
:
3438 if (!onlyproto
&& (nh
+ offset
+ 2 < skb
->data
||
3439 pskb_may_pull(skb
, nh
+ offset
+ 2 - skb
->data
))) {
3442 nh
= skb_network_header(skb
);
3443 icmp
= (u8
*)(nh
+ offset
);
3444 fl6
->fl6_icmp_type
= icmp
[0];
3445 fl6
->fl6_icmp_code
= icmp
[1];
3447 fl6
->flowi6_proto
= nexthdr
;
3449 #if IS_ENABLED(CONFIG_IPV6_MIP6)
3451 offset
+= ipv6_optlen(exthdr
);
3452 if (!onlyproto
&& (nh
+ offset
+ 3 < skb
->data
||
3453 pskb_may_pull(skb
, nh
+ offset
+ 3 - skb
->data
))) {
3456 nh
= skb_network_header(skb
);
3457 mh
= (struct ip6_mh
*)(nh
+ offset
);
3458 fl6
->fl6_mh_type
= mh
->ip6mh_type
;
3460 fl6
->flowi6_proto
= nexthdr
;
3463 /* XXX Why are there these headers? */
3468 fl6
->fl6_ipsec_spi
= 0;
3469 fl6
->flowi6_proto
= nexthdr
;
3476 int __xfrm_decode_session(struct sk_buff
*skb
, struct flowi
*fl
,
3477 unsigned int family
, int reverse
)
3481 decode_session4(skb
, fl
, reverse
);
3483 #if IS_ENABLED(CONFIG_IPV6)
3485 decode_session6(skb
, fl
, reverse
);
3489 return -EAFNOSUPPORT
;
3492 return security_xfrm_decode_session(skb
, &fl
->flowi_secid
);
3494 EXPORT_SYMBOL(__xfrm_decode_session
);
3496 static inline int secpath_has_nontransport(const struct sec_path
*sp
, int k
, int *idxp
)
3498 for (; k
< sp
->len
; k
++) {
3499 if (sp
->xvec
[k
]->props
.mode
!= XFRM_MODE_TRANSPORT
) {
3508 int __xfrm_policy_check(struct sock
*sk
, int dir
, struct sk_buff
*skb
,
3509 unsigned short family
)
3511 struct net
*net
= dev_net(skb
->dev
);
3512 struct xfrm_policy
*pol
;
3513 struct xfrm_policy
*pols
[XFRM_POLICY_TYPE_MAX
];
3520 const struct xfrm_if_cb
*ifcb
;
3521 struct sec_path
*sp
;
3526 ifcb
= xfrm_if_get_cb();
3529 xi
= ifcb
->decode_session(skb
, family
);
3531 if_id
= xi
->p
.if_id
;
3537 reverse
= dir
& ~XFRM_POLICY_MASK
;
3538 dir
&= XFRM_POLICY_MASK
;
3540 if (__xfrm_decode_session(skb
, &fl
, family
, reverse
) < 0) {
3541 XFRM_INC_STATS(net
, LINUX_MIB_XFRMINHDRERROR
);
3545 nf_nat_decode_session(skb
, &fl
, family
);
3547 /* First, check used SA against their selectors. */
3548 sp
= skb_sec_path(skb
);
3552 for (i
= sp
->len
- 1; i
>= 0; i
--) {
3553 struct xfrm_state
*x
= sp
->xvec
[i
];
3554 if (!xfrm_selector_match(&x
->sel
, &fl
, family
)) {
3555 XFRM_INC_STATS(net
, LINUX_MIB_XFRMINSTATEMISMATCH
);
3562 sk
= sk_to_full_sk(sk
);
3563 if (sk
&& sk
->sk_policy
[dir
]) {
3564 pol
= xfrm_sk_policy_lookup(sk
, dir
, &fl
, family
, if_id
);
3566 XFRM_INC_STATS(net
, LINUX_MIB_XFRMINPOLERROR
);
3572 pol
= xfrm_policy_lookup(net
, &fl
, family
, dir
, if_id
);
3575 XFRM_INC_STATS(net
, LINUX_MIB_XFRMINPOLERROR
);
3580 if (sp
&& secpath_has_nontransport(sp
, 0, &xerr_idx
)) {
3581 xfrm_secpath_reject(xerr_idx
, skb
, &fl
);
3582 XFRM_INC_STATS(net
, LINUX_MIB_XFRMINNOPOLS
);
3588 pol
->curlft
.use_time
= ktime_get_real_seconds();
3592 #ifdef CONFIG_XFRM_SUB_POLICY
3593 if (pols
[0]->type
!= XFRM_POLICY_TYPE_MAIN
) {
3594 pols
[1] = xfrm_policy_lookup_bytype(net
, XFRM_POLICY_TYPE_MAIN
,
3596 XFRM_POLICY_IN
, if_id
);
3598 if (IS_ERR(pols
[1])) {
3599 XFRM_INC_STATS(net
, LINUX_MIB_XFRMINPOLERROR
);
3602 pols
[1]->curlft
.use_time
= ktime_get_real_seconds();
3608 if (pol
->action
== XFRM_POLICY_ALLOW
) {
3609 static struct sec_path dummy
;
3610 struct xfrm_tmpl
*tp
[XFRM_MAX_DEPTH
];
3611 struct xfrm_tmpl
*stp
[XFRM_MAX_DEPTH
];
3612 struct xfrm_tmpl
**tpp
= tp
;
3616 sp
= skb_sec_path(skb
);
3620 for (pi
= 0; pi
< npols
; pi
++) {
3621 if (pols
[pi
] != pol
&&
3622 pols
[pi
]->action
!= XFRM_POLICY_ALLOW
) {
3623 XFRM_INC_STATS(net
, LINUX_MIB_XFRMINPOLBLOCK
);
3626 if (ti
+ pols
[pi
]->xfrm_nr
>= XFRM_MAX_DEPTH
) {
3627 XFRM_INC_STATS(net
, LINUX_MIB_XFRMINBUFFERERROR
);
3630 for (i
= 0; i
< pols
[pi
]->xfrm_nr
; i
++)
3631 tpp
[ti
++] = &pols
[pi
]->xfrm_vec
[i
];
3635 xfrm_tmpl_sort(stp
, tpp
, xfrm_nr
, family
);
3639 /* For each tunnel xfrm, find the first matching tmpl.
3640 * For each tmpl before that, find corresponding xfrm.
3641 * Order is _important_. Later we will implement
3642 * some barriers, but at the moment barriers
3643 * are implied between each two transformations.
3645 for (i
= xfrm_nr
-1, k
= 0; i
>= 0; i
--) {
3646 k
= xfrm_policy_ok(tpp
[i
], sp
, k
, family
);
3649 /* "-2 - errored_index" returned */
3651 XFRM_INC_STATS(net
, LINUX_MIB_XFRMINTMPLMISMATCH
);
3656 if (secpath_has_nontransport(sp
, k
, &xerr_idx
)) {
3657 XFRM_INC_STATS(net
, LINUX_MIB_XFRMINTMPLMISMATCH
);
3661 xfrm_pols_put(pols
, npols
);
3664 XFRM_INC_STATS(net
, LINUX_MIB_XFRMINPOLBLOCK
);
3667 xfrm_secpath_reject(xerr_idx
, skb
, &fl
);
3669 xfrm_pols_put(pols
, npols
);
3672 EXPORT_SYMBOL(__xfrm_policy_check
);
3674 int __xfrm_route_forward(struct sk_buff
*skb
, unsigned short family
)
3676 struct net
*net
= dev_net(skb
->dev
);
3678 struct dst_entry
*dst
;
3681 if (xfrm_decode_session(skb
, &fl
, family
) < 0) {
3682 XFRM_INC_STATS(net
, LINUX_MIB_XFRMFWDHDRERROR
);
3687 if (!skb_dst(skb
)) {
3688 XFRM_INC_STATS(net
, LINUX_MIB_XFRMFWDHDRERROR
);
3692 dst
= xfrm_lookup(net
, skb_dst(skb
), &fl
, NULL
, XFRM_LOOKUP_QUEUE
);
3697 skb_dst_set(skb
, dst
);
3700 EXPORT_SYMBOL(__xfrm_route_forward
);
3702 /* Optimize later using cookies and generation ids. */
3704 static struct dst_entry
*xfrm_dst_check(struct dst_entry
*dst
, u32 cookie
)
3706 /* Code (such as __xfrm4_bundle_create()) sets dst->obsolete
3707 * to DST_OBSOLETE_FORCE_CHK to force all XFRM destinations to
3708 * get validated by dst_ops->check on every use. We do this
3709 * because when a normal route referenced by an XFRM dst is
3710 * obsoleted we do not go looking around for all parent
3711 * referencing XFRM dsts so that we can invalidate them. It
3712 * is just too much work. Instead we make the checks here on
3713 * every use. For example:
3715 * XFRM dst A --> IPv4 dst X
3717 * X is the "xdst->route" of A (X is also the "dst->path" of A
3718 * in this example). If X is marked obsolete, "A" will not
3719 * notice. That's what we are validating here via the
3720 * stale_bundle() check.
3722 * When a dst is removed from the fib tree, DST_OBSOLETE_DEAD will
3724 * This will force stale_bundle() to fail on any xdst bundle with
3725 * this dst linked in it.
3727 if (dst
->obsolete
< 0 && !stale_bundle(dst
))
3733 static int stale_bundle(struct dst_entry
*dst
)
3735 return !xfrm_bundle_ok((struct xfrm_dst
*)dst
);
3738 void xfrm_dst_ifdown(struct dst_entry
*dst
, struct net_device
*dev
)
3740 while ((dst
= xfrm_dst_child(dst
)) && dst
->xfrm
&& dst
->dev
== dev
) {
3741 dst
->dev
= dev_net(dev
)->loopback_dev
;
3746 EXPORT_SYMBOL(xfrm_dst_ifdown
);
3748 static void xfrm_link_failure(struct sk_buff
*skb
)
3750 /* Impossible. Such dst must be popped before reaches point of failure. */
3753 static struct dst_entry
*xfrm_negative_advice(struct dst_entry
*dst
)
3756 if (dst
->obsolete
) {
3764 static void xfrm_init_pmtu(struct xfrm_dst
**bundle
, int nr
)
3767 struct xfrm_dst
*xdst
= bundle
[nr
];
3768 u32 pmtu
, route_mtu_cached
;
3769 struct dst_entry
*dst
;
3772 pmtu
= dst_mtu(xfrm_dst_child(dst
));
3773 xdst
->child_mtu_cached
= pmtu
;
3775 pmtu
= xfrm_state_mtu(dst
->xfrm
, pmtu
);
3777 route_mtu_cached
= dst_mtu(xdst
->route
);
3778 xdst
->route_mtu_cached
= route_mtu_cached
;
3780 if (pmtu
> route_mtu_cached
)
3781 pmtu
= route_mtu_cached
;
3783 dst_metric_set(dst
, RTAX_MTU
, pmtu
);
3787 /* Check that the bundle accepts the flow and its components are
3791 static int xfrm_bundle_ok(struct xfrm_dst
*first
)
3793 struct xfrm_dst
*bundle
[XFRM_MAX_DEPTH
];
3794 struct dst_entry
*dst
= &first
->u
.dst
;
3795 struct xfrm_dst
*xdst
;
3799 if (!dst_check(xfrm_dst_path(dst
), ((struct xfrm_dst
*)dst
)->path_cookie
) ||
3800 (dst
->dev
&& !netif_running(dst
->dev
)))
3803 if (dst
->flags
& DST_XFRM_QUEUE
)
3806 start_from
= nr
= 0;
3808 struct xfrm_dst
*xdst
= (struct xfrm_dst
*)dst
;
3810 if (dst
->xfrm
->km
.state
!= XFRM_STATE_VALID
)
3812 if (xdst
->xfrm_genid
!= dst
->xfrm
->genid
)
3814 if (xdst
->num_pols
> 0 &&
3815 xdst
->policy_genid
!= atomic_read(&xdst
->pols
[0]->genid
))
3818 bundle
[nr
++] = xdst
;
3820 mtu
= dst_mtu(xfrm_dst_child(dst
));
3821 if (xdst
->child_mtu_cached
!= mtu
) {
3823 xdst
->child_mtu_cached
= mtu
;
3826 if (!dst_check(xdst
->route
, xdst
->route_cookie
))
3828 mtu
= dst_mtu(xdst
->route
);
3829 if (xdst
->route_mtu_cached
!= mtu
) {
3831 xdst
->route_mtu_cached
= mtu
;
3834 dst
= xfrm_dst_child(dst
);
3835 } while (dst
->xfrm
);
3837 if (likely(!start_from
))
3840 xdst
= bundle
[start_from
- 1];
3841 mtu
= xdst
->child_mtu_cached
;
3842 while (start_from
--) {
3845 mtu
= xfrm_state_mtu(dst
->xfrm
, mtu
);
3846 if (mtu
> xdst
->route_mtu_cached
)
3847 mtu
= xdst
->route_mtu_cached
;
3848 dst_metric_set(dst
, RTAX_MTU
, mtu
);
3852 xdst
= bundle
[start_from
- 1];
3853 xdst
->child_mtu_cached
= mtu
;
3859 static unsigned int xfrm_default_advmss(const struct dst_entry
*dst
)
3861 return dst_metric_advmss(xfrm_dst_path(dst
));
3864 static unsigned int xfrm_mtu(const struct dst_entry
*dst
)
3866 unsigned int mtu
= dst_metric_raw(dst
, RTAX_MTU
);
3868 return mtu
? : dst_mtu(xfrm_dst_path(dst
));
3871 static const void *xfrm_get_dst_nexthop(const struct dst_entry
*dst
,
3875 const struct xfrm_state
*xfrm
= dst
->xfrm
;
3877 dst
= xfrm_dst_child(dst
);
3879 if (xfrm
->props
.mode
== XFRM_MODE_TRANSPORT
)
3881 if (xfrm
->type
->flags
& XFRM_TYPE_REMOTE_COADDR
)
3882 daddr
= xfrm
->coaddr
;
3883 else if (!(xfrm
->type
->flags
& XFRM_TYPE_LOCAL_COADDR
))
3884 daddr
= &xfrm
->id
.daddr
;
3889 static struct neighbour
*xfrm_neigh_lookup(const struct dst_entry
*dst
,
3890 struct sk_buff
*skb
,
3893 const struct dst_entry
*path
= xfrm_dst_path(dst
);
3896 daddr
= xfrm_get_dst_nexthop(dst
, daddr
);
3897 return path
->ops
->neigh_lookup(path
, skb
, daddr
);
3900 static void xfrm_confirm_neigh(const struct dst_entry
*dst
, const void *daddr
)
3902 const struct dst_entry
*path
= xfrm_dst_path(dst
);
3904 daddr
= xfrm_get_dst_nexthop(dst
, daddr
);
3905 path
->ops
->confirm_neigh(path
, daddr
);
3908 int xfrm_policy_register_afinfo(const struct xfrm_policy_afinfo
*afinfo
, int family
)
3912 if (WARN_ON(family
>= ARRAY_SIZE(xfrm_policy_afinfo
)))
3913 return -EAFNOSUPPORT
;
3915 spin_lock(&xfrm_policy_afinfo_lock
);
3916 if (unlikely(xfrm_policy_afinfo
[family
] != NULL
))
3919 struct dst_ops
*dst_ops
= afinfo
->dst_ops
;
3920 if (likely(dst_ops
->kmem_cachep
== NULL
))
3921 dst_ops
->kmem_cachep
= xfrm_dst_cache
;
3922 if (likely(dst_ops
->check
== NULL
))
3923 dst_ops
->check
= xfrm_dst_check
;
3924 if (likely(dst_ops
->default_advmss
== NULL
))
3925 dst_ops
->default_advmss
= xfrm_default_advmss
;
3926 if (likely(dst_ops
->mtu
== NULL
))
3927 dst_ops
->mtu
= xfrm_mtu
;
3928 if (likely(dst_ops
->negative_advice
== NULL
))
3929 dst_ops
->negative_advice
= xfrm_negative_advice
;
3930 if (likely(dst_ops
->link_failure
== NULL
))
3931 dst_ops
->link_failure
= xfrm_link_failure
;
3932 if (likely(dst_ops
->neigh_lookup
== NULL
))
3933 dst_ops
->neigh_lookup
= xfrm_neigh_lookup
;
3934 if (likely(!dst_ops
->confirm_neigh
))
3935 dst_ops
->confirm_neigh
= xfrm_confirm_neigh
;
3936 rcu_assign_pointer(xfrm_policy_afinfo
[family
], afinfo
);
3938 spin_unlock(&xfrm_policy_afinfo_lock
);
3942 EXPORT_SYMBOL(xfrm_policy_register_afinfo
);
3944 void xfrm_policy_unregister_afinfo(const struct xfrm_policy_afinfo
*afinfo
)
3946 struct dst_ops
*dst_ops
= afinfo
->dst_ops
;
3949 for (i
= 0; i
< ARRAY_SIZE(xfrm_policy_afinfo
); i
++) {
3950 if (xfrm_policy_afinfo
[i
] != afinfo
)
3952 RCU_INIT_POINTER(xfrm_policy_afinfo
[i
], NULL
);
3958 dst_ops
->kmem_cachep
= NULL
;
3959 dst_ops
->check
= NULL
;
3960 dst_ops
->negative_advice
= NULL
;
3961 dst_ops
->link_failure
= NULL
;
3963 EXPORT_SYMBOL(xfrm_policy_unregister_afinfo
);
3965 void xfrm_if_register_cb(const struct xfrm_if_cb
*ifcb
)
3967 spin_lock(&xfrm_if_cb_lock
);
3968 rcu_assign_pointer(xfrm_if_cb
, ifcb
);
3969 spin_unlock(&xfrm_if_cb_lock
);
3971 EXPORT_SYMBOL(xfrm_if_register_cb
);
3973 void xfrm_if_unregister_cb(void)
3975 RCU_INIT_POINTER(xfrm_if_cb
, NULL
);
3978 EXPORT_SYMBOL(xfrm_if_unregister_cb
);
3980 #ifdef CONFIG_XFRM_STATISTICS
3981 static int __net_init
xfrm_statistics_init(struct net
*net
)
3984 net
->mib
.xfrm_statistics
= alloc_percpu(struct linux_xfrm_mib
);
3985 if (!net
->mib
.xfrm_statistics
)
3987 rv
= xfrm_proc_init(net
);
3989 free_percpu(net
->mib
.xfrm_statistics
);
3993 static void xfrm_statistics_fini(struct net
*net
)
3995 xfrm_proc_fini(net
);
3996 free_percpu(net
->mib
.xfrm_statistics
);
3999 static int __net_init
xfrm_statistics_init(struct net
*net
)
4004 static void xfrm_statistics_fini(struct net
*net
)
4009 static int __net_init
xfrm_policy_init(struct net
*net
)
4011 unsigned int hmask
, sz
;
4014 if (net_eq(net
, &init_net
)) {
4015 xfrm_dst_cache
= kmem_cache_create("xfrm_dst_cache",
4016 sizeof(struct xfrm_dst
),
4017 0, SLAB_HWCACHE_ALIGN
|SLAB_PANIC
,
4019 err
= rhashtable_init(&xfrm_policy_inexact_table
,
4020 &xfrm_pol_inexact_params
);
4025 sz
= (hmask
+1) * sizeof(struct hlist_head
);
4027 net
->xfrm
.policy_byidx
= xfrm_hash_alloc(sz
);
4028 if (!net
->xfrm
.policy_byidx
)
4030 net
->xfrm
.policy_idx_hmask
= hmask
;
4032 for (dir
= 0; dir
< XFRM_POLICY_MAX
; dir
++) {
4033 struct xfrm_policy_hash
*htab
;
4035 net
->xfrm
.policy_count
[dir
] = 0;
4036 net
->xfrm
.policy_count
[XFRM_POLICY_MAX
+ dir
] = 0;
4037 INIT_HLIST_HEAD(&net
->xfrm
.policy_inexact
[dir
]);
4039 htab
= &net
->xfrm
.policy_bydst
[dir
];
4040 htab
->table
= xfrm_hash_alloc(sz
);
4043 htab
->hmask
= hmask
;
4049 net
->xfrm
.policy_hthresh
.lbits4
= 32;
4050 net
->xfrm
.policy_hthresh
.rbits4
= 32;
4051 net
->xfrm
.policy_hthresh
.lbits6
= 128;
4052 net
->xfrm
.policy_hthresh
.rbits6
= 128;
4054 seqlock_init(&net
->xfrm
.policy_hthresh
.lock
);
4056 INIT_LIST_HEAD(&net
->xfrm
.policy_all
);
4057 INIT_LIST_HEAD(&net
->xfrm
.inexact_bins
);
4058 INIT_WORK(&net
->xfrm
.policy_hash_work
, xfrm_hash_resize
);
4059 INIT_WORK(&net
->xfrm
.policy_hthresh
.work
, xfrm_hash_rebuild
);
4063 for (dir
--; dir
>= 0; dir
--) {
4064 struct xfrm_policy_hash
*htab
;
4066 htab
= &net
->xfrm
.policy_bydst
[dir
];
4067 xfrm_hash_free(htab
->table
, sz
);
4069 xfrm_hash_free(net
->xfrm
.policy_byidx
, sz
);
4074 static void xfrm_policy_fini(struct net
*net
)
4076 struct xfrm_pol_inexact_bin
*b
, *t
;
4080 flush_work(&net
->xfrm
.policy_hash_work
);
4081 #ifdef CONFIG_XFRM_SUB_POLICY
4082 xfrm_policy_flush(net
, XFRM_POLICY_TYPE_SUB
, false);
4084 xfrm_policy_flush(net
, XFRM_POLICY_TYPE_MAIN
, false);
4086 WARN_ON(!list_empty(&net
->xfrm
.policy_all
));
4088 for (dir
= 0; dir
< XFRM_POLICY_MAX
; dir
++) {
4089 struct xfrm_policy_hash
*htab
;
4091 WARN_ON(!hlist_empty(&net
->xfrm
.policy_inexact
[dir
]));
4093 htab
= &net
->xfrm
.policy_bydst
[dir
];
4094 sz
= (htab
->hmask
+ 1) * sizeof(struct hlist_head
);
4095 WARN_ON(!hlist_empty(htab
->table
));
4096 xfrm_hash_free(htab
->table
, sz
);
4099 sz
= (net
->xfrm
.policy_idx_hmask
+ 1) * sizeof(struct hlist_head
);
4100 WARN_ON(!hlist_empty(net
->xfrm
.policy_byidx
));
4101 xfrm_hash_free(net
->xfrm
.policy_byidx
, sz
);
4103 spin_lock_bh(&net
->xfrm
.xfrm_policy_lock
);
4104 list_for_each_entry_safe(b
, t
, &net
->xfrm
.inexact_bins
, inexact_bins
)
4105 __xfrm_policy_inexact_prune_bin(b
, true);
4106 spin_unlock_bh(&net
->xfrm
.xfrm_policy_lock
);
4109 static int __net_init
xfrm_net_init(struct net
*net
)
4113 /* Initialize the per-net locks here */
4114 spin_lock_init(&net
->xfrm
.xfrm_state_lock
);
4115 spin_lock_init(&net
->xfrm
.xfrm_policy_lock
);
4116 mutex_init(&net
->xfrm
.xfrm_cfg_mutex
);
4118 rv
= xfrm_statistics_init(net
);
4120 goto out_statistics
;
4121 rv
= xfrm_state_init(net
);
4124 rv
= xfrm_policy_init(net
);
4127 rv
= xfrm_sysctl_init(net
);
4134 xfrm_policy_fini(net
);
4136 xfrm_state_fini(net
);
4138 xfrm_statistics_fini(net
);
4143 static void __net_exit
xfrm_net_exit(struct net
*net
)
4145 xfrm_sysctl_fini(net
);
4146 xfrm_policy_fini(net
);
4147 xfrm_state_fini(net
);
4148 xfrm_statistics_fini(net
);
4151 static struct pernet_operations __net_initdata xfrm_net_ops
= {
4152 .init
= xfrm_net_init
,
4153 .exit
= xfrm_net_exit
,
4156 void __init
xfrm_init(void)
4158 register_pernet_subsys(&xfrm_net_ops
);
4160 seqcount_init(&xfrm_policy_hash_generation
);
4163 #ifdef CONFIG_INET_ESPINTCP
4167 RCU_INIT_POINTER(xfrm_if_cb
, NULL
);
4171 #ifdef CONFIG_AUDITSYSCALL
4172 static void xfrm_audit_common_policyinfo(struct xfrm_policy
*xp
,
4173 struct audit_buffer
*audit_buf
)
4175 struct xfrm_sec_ctx
*ctx
= xp
->security
;
4176 struct xfrm_selector
*sel
= &xp
->selector
;
4179 audit_log_format(audit_buf
, " sec_alg=%u sec_doi=%u sec_obj=%s",
4180 ctx
->ctx_alg
, ctx
->ctx_doi
, ctx
->ctx_str
);
4182 switch (sel
->family
) {
4184 audit_log_format(audit_buf
, " src=%pI4", &sel
->saddr
.a4
);
4185 if (sel
->prefixlen_s
!= 32)
4186 audit_log_format(audit_buf
, " src_prefixlen=%d",
4188 audit_log_format(audit_buf
, " dst=%pI4", &sel
->daddr
.a4
);
4189 if (sel
->prefixlen_d
!= 32)
4190 audit_log_format(audit_buf
, " dst_prefixlen=%d",
4194 audit_log_format(audit_buf
, " src=%pI6", sel
->saddr
.a6
);
4195 if (sel
->prefixlen_s
!= 128)
4196 audit_log_format(audit_buf
, " src_prefixlen=%d",
4198 audit_log_format(audit_buf
, " dst=%pI6", sel
->daddr
.a6
);
4199 if (sel
->prefixlen_d
!= 128)
4200 audit_log_format(audit_buf
, " dst_prefixlen=%d",
4206 void xfrm_audit_policy_add(struct xfrm_policy
*xp
, int result
, bool task_valid
)
4208 struct audit_buffer
*audit_buf
;
4210 audit_buf
= xfrm_audit_start("SPD-add");
4211 if (audit_buf
== NULL
)
4213 xfrm_audit_helper_usrinfo(task_valid
, audit_buf
);
4214 audit_log_format(audit_buf
, " res=%u", result
);
4215 xfrm_audit_common_policyinfo(xp
, audit_buf
);
4216 audit_log_end(audit_buf
);
4218 EXPORT_SYMBOL_GPL(xfrm_audit_policy_add
);
4220 void xfrm_audit_policy_delete(struct xfrm_policy
*xp
, int result
,
4223 struct audit_buffer
*audit_buf
;
4225 audit_buf
= xfrm_audit_start("SPD-delete");
4226 if (audit_buf
== NULL
)
4228 xfrm_audit_helper_usrinfo(task_valid
, audit_buf
);
4229 audit_log_format(audit_buf
, " res=%u", result
);
4230 xfrm_audit_common_policyinfo(xp
, audit_buf
);
4231 audit_log_end(audit_buf
);
4233 EXPORT_SYMBOL_GPL(xfrm_audit_policy_delete
);
4236 #ifdef CONFIG_XFRM_MIGRATE
4237 static bool xfrm_migrate_selector_match(const struct xfrm_selector
*sel_cmp
,
4238 const struct xfrm_selector
*sel_tgt
)
4240 if (sel_cmp
->proto
== IPSEC_ULPROTO_ANY
) {
4241 if (sel_tgt
->family
== sel_cmp
->family
&&
4242 xfrm_addr_equal(&sel_tgt
->daddr
, &sel_cmp
->daddr
,
4244 xfrm_addr_equal(&sel_tgt
->saddr
, &sel_cmp
->saddr
,
4246 sel_tgt
->prefixlen_d
== sel_cmp
->prefixlen_d
&&
4247 sel_tgt
->prefixlen_s
== sel_cmp
->prefixlen_s
) {
4251 if (memcmp(sel_tgt
, sel_cmp
, sizeof(*sel_tgt
)) == 0) {
4258 static struct xfrm_policy
*xfrm_migrate_policy_find(const struct xfrm_selector
*sel
,
4259 u8 dir
, u8 type
, struct net
*net
)
4261 struct xfrm_policy
*pol
, *ret
= NULL
;
4262 struct hlist_head
*chain
;
4265 spin_lock_bh(&net
->xfrm
.xfrm_policy_lock
);
4266 chain
= policy_hash_direct(net
, &sel
->daddr
, &sel
->saddr
, sel
->family
, dir
);
4267 hlist_for_each_entry(pol
, chain
, bydst
) {
4268 if (xfrm_migrate_selector_match(sel
, &pol
->selector
) &&
4269 pol
->type
== type
) {
4271 priority
= ret
->priority
;
4275 chain
= &net
->xfrm
.policy_inexact
[dir
];
4276 hlist_for_each_entry(pol
, chain
, bydst_inexact_list
) {
4277 if ((pol
->priority
>= priority
) && ret
)
4280 if (xfrm_migrate_selector_match(sel
, &pol
->selector
) &&
4281 pol
->type
== type
) {
4289 spin_unlock_bh(&net
->xfrm
.xfrm_policy_lock
);
4294 static int migrate_tmpl_match(const struct xfrm_migrate
*m
, const struct xfrm_tmpl
*t
)
4298 if (t
->mode
== m
->mode
&& t
->id
.proto
== m
->proto
&&
4299 (m
->reqid
== 0 || t
->reqid
== m
->reqid
)) {
4301 case XFRM_MODE_TUNNEL
:
4302 case XFRM_MODE_BEET
:
4303 if (xfrm_addr_equal(&t
->id
.daddr
, &m
->old_daddr
,
4305 xfrm_addr_equal(&t
->saddr
, &m
->old_saddr
,
4310 case XFRM_MODE_TRANSPORT
:
4311 /* in case of transport mode, template does not store
4312 any IP addresses, hence we just compare mode and
4323 /* update endpoint address(es) of template(s) */
4324 static int xfrm_policy_migrate(struct xfrm_policy
*pol
,
4325 struct xfrm_migrate
*m
, int num_migrate
)
4327 struct xfrm_migrate
*mp
;
4330 write_lock_bh(&pol
->lock
);
4331 if (unlikely(pol
->walk
.dead
)) {
4332 /* target policy has been deleted */
4333 write_unlock_bh(&pol
->lock
);
4337 for (i
= 0; i
< pol
->xfrm_nr
; i
++) {
4338 for (j
= 0, mp
= m
; j
< num_migrate
; j
++, mp
++) {
4339 if (!migrate_tmpl_match(mp
, &pol
->xfrm_vec
[i
]))
4342 if (pol
->xfrm_vec
[i
].mode
!= XFRM_MODE_TUNNEL
&&
4343 pol
->xfrm_vec
[i
].mode
!= XFRM_MODE_BEET
)
4345 /* update endpoints */
4346 memcpy(&pol
->xfrm_vec
[i
].id
.daddr
, &mp
->new_daddr
,
4347 sizeof(pol
->xfrm_vec
[i
].id
.daddr
));
4348 memcpy(&pol
->xfrm_vec
[i
].saddr
, &mp
->new_saddr
,
4349 sizeof(pol
->xfrm_vec
[i
].saddr
));
4350 pol
->xfrm_vec
[i
].encap_family
= mp
->new_family
;
4352 atomic_inc(&pol
->genid
);
4356 write_unlock_bh(&pol
->lock
);
4364 static int xfrm_migrate_check(const struct xfrm_migrate
*m
, int num_migrate
)
4368 if (num_migrate
< 1 || num_migrate
> XFRM_MAX_DEPTH
)
4371 for (i
= 0; i
< num_migrate
; i
++) {
4372 if (xfrm_addr_any(&m
[i
].new_daddr
, m
[i
].new_family
) ||
4373 xfrm_addr_any(&m
[i
].new_saddr
, m
[i
].new_family
))
4376 /* check if there is any duplicated entry */
4377 for (j
= i
+ 1; j
< num_migrate
; j
++) {
4378 if (!memcmp(&m
[i
].old_daddr
, &m
[j
].old_daddr
,
4379 sizeof(m
[i
].old_daddr
)) &&
4380 !memcmp(&m
[i
].old_saddr
, &m
[j
].old_saddr
,
4381 sizeof(m
[i
].old_saddr
)) &&
4382 m
[i
].proto
== m
[j
].proto
&&
4383 m
[i
].mode
== m
[j
].mode
&&
4384 m
[i
].reqid
== m
[j
].reqid
&&
4385 m
[i
].old_family
== m
[j
].old_family
)
4393 int xfrm_migrate(const struct xfrm_selector
*sel
, u8 dir
, u8 type
,
4394 struct xfrm_migrate
*m
, int num_migrate
,
4395 struct xfrm_kmaddress
*k
, struct net
*net
,
4396 struct xfrm_encap_tmpl
*encap
)
4398 int i
, err
, nx_cur
= 0, nx_new
= 0;
4399 struct xfrm_policy
*pol
= NULL
;
4400 struct xfrm_state
*x
, *xc
;
4401 struct xfrm_state
*x_cur
[XFRM_MAX_DEPTH
];
4402 struct xfrm_state
*x_new
[XFRM_MAX_DEPTH
];
4403 struct xfrm_migrate
*mp
;
4405 /* Stage 0 - sanity checks */
4406 if ((err
= xfrm_migrate_check(m
, num_migrate
)) < 0)
4409 if (dir
>= XFRM_POLICY_MAX
) {
4414 /* Stage 1 - find policy */
4415 if ((pol
= xfrm_migrate_policy_find(sel
, dir
, type
, net
)) == NULL
) {
4420 /* Stage 2 - find and update state(s) */
4421 for (i
= 0, mp
= m
; i
< num_migrate
; i
++, mp
++) {
4422 if ((x
= xfrm_migrate_state_find(mp
, net
))) {
4425 xc
= xfrm_state_migrate(x
, mp
, encap
);
4436 /* Stage 3 - update policy */
4437 if ((err
= xfrm_policy_migrate(pol
, m
, num_migrate
)) < 0)
4440 /* Stage 4 - delete old state(s) */
4442 xfrm_states_put(x_cur
, nx_cur
);
4443 xfrm_states_delete(x_cur
, nx_cur
);
4446 /* Stage 5 - announce */
4447 km_migrate(sel
, dir
, type
, m
, num_migrate
, k
, encap
);
4459 xfrm_states_put(x_cur
, nx_cur
);
4461 xfrm_states_delete(x_new
, nx_new
);
4465 EXPORT_SYMBOL(xfrm_migrate
);