6 * Kazunori MIYAZAWA @USAGI
7 * Kunihiro Ishiguro <kunihiro@ipinfusion.com>
9 * Kazunori MIYAZAWA @USAGI
11 * Split up af-specific portion
12 * Derek Atkins <derek@ihtfp.com> Add the post_input processor
17 #include <linux/config.h>
18 #include <linux/slab.h>
19 #include <linux/kmod.h>
20 #include <linux/list.h>
21 #include <linux/spinlock.h>
22 #include <linux/workqueue.h>
23 #include <linux/notifier.h>
24 #include <linux/netdevice.h>
25 #include <linux/module.h>
29 DECLARE_MUTEX(xfrm_cfg_sem
);
30 EXPORT_SYMBOL(xfrm_cfg_sem
);
32 static DEFINE_RWLOCK(xfrm_policy_lock
);
34 struct xfrm_policy
*xfrm_policy_list
[XFRM_POLICY_MAX
*2];
35 EXPORT_SYMBOL(xfrm_policy_list
);
37 static DEFINE_RWLOCK(xfrm_policy_afinfo_lock
);
38 static struct xfrm_policy_afinfo
*xfrm_policy_afinfo
[NPROTO
];
40 static kmem_cache_t
*xfrm_dst_cache __read_mostly
;
42 static struct work_struct xfrm_policy_gc_work
;
43 static struct list_head xfrm_policy_gc_list
=
44 LIST_HEAD_INIT(xfrm_policy_gc_list
);
45 static DEFINE_SPINLOCK(xfrm_policy_gc_lock
);
47 static struct xfrm_policy_afinfo
*xfrm_policy_get_afinfo(unsigned short family
);
48 static void xfrm_policy_put_afinfo(struct xfrm_policy_afinfo
*afinfo
);
50 int xfrm_register_type(struct xfrm_type
*type
, unsigned short family
)
52 struct xfrm_policy_afinfo
*afinfo
= xfrm_policy_get_afinfo(family
);
53 struct xfrm_type_map
*typemap
;
56 if (unlikely(afinfo
== NULL
))
58 typemap
= afinfo
->type_map
;
60 write_lock(&typemap
->lock
);
61 if (likely(typemap
->map
[type
->proto
] == NULL
))
62 typemap
->map
[type
->proto
] = type
;
65 write_unlock(&typemap
->lock
);
66 xfrm_policy_put_afinfo(afinfo
);
69 EXPORT_SYMBOL(xfrm_register_type
);
71 int xfrm_unregister_type(struct xfrm_type
*type
, unsigned short family
)
73 struct xfrm_policy_afinfo
*afinfo
= xfrm_policy_get_afinfo(family
);
74 struct xfrm_type_map
*typemap
;
77 if (unlikely(afinfo
== NULL
))
79 typemap
= afinfo
->type_map
;
81 write_lock(&typemap
->lock
);
82 if (unlikely(typemap
->map
[type
->proto
] != type
))
85 typemap
->map
[type
->proto
] = NULL
;
86 write_unlock(&typemap
->lock
);
87 xfrm_policy_put_afinfo(afinfo
);
90 EXPORT_SYMBOL(xfrm_unregister_type
);
92 struct xfrm_type
*xfrm_get_type(u8 proto
, unsigned short family
)
94 struct xfrm_policy_afinfo
*afinfo
;
95 struct xfrm_type_map
*typemap
;
96 struct xfrm_type
*type
;
97 int modload_attempted
= 0;
100 afinfo
= xfrm_policy_get_afinfo(family
);
101 if (unlikely(afinfo
== NULL
))
103 typemap
= afinfo
->type_map
;
105 read_lock(&typemap
->lock
);
106 type
= typemap
->map
[proto
];
107 if (unlikely(type
&& !try_module_get(type
->owner
)))
109 read_unlock(&typemap
->lock
);
110 if (!type
&& !modload_attempted
) {
111 xfrm_policy_put_afinfo(afinfo
);
112 request_module("xfrm-type-%d-%d",
113 (int) family
, (int) proto
);
114 modload_attempted
= 1;
118 xfrm_policy_put_afinfo(afinfo
);
122 int xfrm_dst_lookup(struct xfrm_dst
**dst
, struct flowi
*fl
,
123 unsigned short family
)
125 struct xfrm_policy_afinfo
*afinfo
= xfrm_policy_get_afinfo(family
);
128 if (unlikely(afinfo
== NULL
))
129 return -EAFNOSUPPORT
;
131 if (likely(afinfo
->dst_lookup
!= NULL
))
132 err
= afinfo
->dst_lookup(dst
, fl
);
135 xfrm_policy_put_afinfo(afinfo
);
138 EXPORT_SYMBOL(xfrm_dst_lookup
);
140 void xfrm_put_type(struct xfrm_type
*type
)
142 module_put(type
->owner
);
145 static inline unsigned long make_jiffies(long secs
)
147 if (secs
>= (MAX_SCHEDULE_TIMEOUT
-1)/HZ
)
148 return MAX_SCHEDULE_TIMEOUT
-1;
153 static void xfrm_policy_timer(unsigned long data
)
155 struct xfrm_policy
*xp
= (struct xfrm_policy
*)data
;
156 unsigned long now
= (unsigned long)xtime
.tv_sec
;
157 long next
= LONG_MAX
;
161 read_lock(&xp
->lock
);
168 if (xp
->lft
.hard_add_expires_seconds
) {
169 long tmo
= xp
->lft
.hard_add_expires_seconds
+
170 xp
->curlft
.add_time
- now
;
176 if (xp
->lft
.hard_use_expires_seconds
) {
177 long tmo
= xp
->lft
.hard_use_expires_seconds
+
178 (xp
->curlft
.use_time
? : xp
->curlft
.add_time
) - now
;
184 if (xp
->lft
.soft_add_expires_seconds
) {
185 long tmo
= xp
->lft
.soft_add_expires_seconds
+
186 xp
->curlft
.add_time
- now
;
189 tmo
= XFRM_KM_TIMEOUT
;
194 if (xp
->lft
.soft_use_expires_seconds
) {
195 long tmo
= xp
->lft
.soft_use_expires_seconds
+
196 (xp
->curlft
.use_time
? : xp
->curlft
.add_time
) - now
;
199 tmo
= XFRM_KM_TIMEOUT
;
206 km_policy_expired(xp
, dir
, 0);
207 if (next
!= LONG_MAX
&&
208 !mod_timer(&xp
->timer
, jiffies
+ make_jiffies(next
)))
212 read_unlock(&xp
->lock
);
217 read_unlock(&xp
->lock
);
218 if (!xfrm_policy_delete(xp
, dir
))
219 km_policy_expired(xp
, dir
, 1);
224 /* Allocate xfrm_policy. Not used here, it is supposed to be used by pfkeyv2
228 struct xfrm_policy
*xfrm_policy_alloc(int gfp
)
230 struct xfrm_policy
*policy
;
232 policy
= kmalloc(sizeof(struct xfrm_policy
), gfp
);
235 memset(policy
, 0, sizeof(struct xfrm_policy
));
236 atomic_set(&policy
->refcnt
, 1);
237 rwlock_init(&policy
->lock
);
238 init_timer(&policy
->timer
);
239 policy
->timer
.data
= (unsigned long)policy
;
240 policy
->timer
.function
= xfrm_policy_timer
;
244 EXPORT_SYMBOL(xfrm_policy_alloc
);
246 /* Destroy xfrm_policy: descendant resources must be released to this moment. */
248 void __xfrm_policy_destroy(struct xfrm_policy
*policy
)
256 if (del_timer(&policy
->timer
))
261 EXPORT_SYMBOL(__xfrm_policy_destroy
);
263 static void xfrm_policy_gc_kill(struct xfrm_policy
*policy
)
265 struct dst_entry
*dst
;
267 while ((dst
= policy
->bundles
) != NULL
) {
268 policy
->bundles
= dst
->next
;
272 if (del_timer(&policy
->timer
))
273 atomic_dec(&policy
->refcnt
);
275 if (atomic_read(&policy
->refcnt
) > 1)
278 xfrm_pol_put(policy
);
281 static void xfrm_policy_gc_task(void *data
)
283 struct xfrm_policy
*policy
;
284 struct list_head
*entry
, *tmp
;
285 struct list_head gc_list
= LIST_HEAD_INIT(gc_list
);
287 spin_lock_bh(&xfrm_policy_gc_lock
);
288 list_splice_init(&xfrm_policy_gc_list
, &gc_list
);
289 spin_unlock_bh(&xfrm_policy_gc_lock
);
291 list_for_each_safe(entry
, tmp
, &gc_list
) {
292 policy
= list_entry(entry
, struct xfrm_policy
, list
);
293 xfrm_policy_gc_kill(policy
);
297 /* Rule must be locked. Release descentant resources, announce
298 * entry dead. The rule must be unlinked from lists to the moment.
301 static void xfrm_policy_kill(struct xfrm_policy
*policy
)
305 write_lock_bh(&policy
->lock
);
308 write_unlock_bh(&policy
->lock
);
310 if (unlikely(dead
)) {
315 spin_lock(&xfrm_policy_gc_lock
);
316 list_add(&policy
->list
, &xfrm_policy_gc_list
);
317 spin_unlock(&xfrm_policy_gc_lock
);
319 schedule_work(&xfrm_policy_gc_work
);
322 /* Generate new index... KAME seems to generate them ordered by cost
323 * of an absolute inpredictability of ordering of rules. This will not pass. */
324 static u32
xfrm_gen_index(int dir
)
327 struct xfrm_policy
*p
;
328 static u32 idx_generator
;
331 idx
= (idx_generator
| dir
);
335 for (p
= xfrm_policy_list
[dir
]; p
; p
= p
->next
) {
344 int xfrm_policy_insert(int dir
, struct xfrm_policy
*policy
, int excl
)
346 struct xfrm_policy
*pol
, **p
;
347 struct xfrm_policy
*delpol
= NULL
;
348 struct xfrm_policy
**newpos
= NULL
;
350 write_lock_bh(&xfrm_policy_lock
);
351 for (p
= &xfrm_policy_list
[dir
]; (pol
=*p
)!=NULL
;) {
352 if (!delpol
&& memcmp(&policy
->selector
, &pol
->selector
, sizeof(pol
->selector
)) == 0) {
354 write_unlock_bh(&xfrm_policy_lock
);
359 if (policy
->priority
> pol
->priority
)
361 } else if (policy
->priority
>= pol
->priority
) {
373 xfrm_pol_hold(policy
);
376 atomic_inc(&flow_cache_genid
);
377 policy
->index
= delpol
? delpol
->index
: xfrm_gen_index(dir
);
378 policy
->curlft
.add_time
= (unsigned long)xtime
.tv_sec
;
379 policy
->curlft
.use_time
= 0;
380 if (!mod_timer(&policy
->timer
, jiffies
+ HZ
))
381 xfrm_pol_hold(policy
);
382 write_unlock_bh(&xfrm_policy_lock
);
385 xfrm_policy_kill(delpol
);
389 EXPORT_SYMBOL(xfrm_policy_insert
);
391 struct xfrm_policy
*xfrm_policy_bysel(int dir
, struct xfrm_selector
*sel
,
394 struct xfrm_policy
*pol
, **p
;
396 write_lock_bh(&xfrm_policy_lock
);
397 for (p
= &xfrm_policy_list
[dir
]; (pol
=*p
)!=NULL
; p
= &pol
->next
) {
398 if (memcmp(sel
, &pol
->selector
, sizeof(*sel
)) == 0) {
405 write_unlock_bh(&xfrm_policy_lock
);
408 atomic_inc(&flow_cache_genid
);
409 xfrm_policy_kill(pol
);
413 EXPORT_SYMBOL(xfrm_policy_bysel
);
415 struct xfrm_policy
*xfrm_policy_byid(int dir
, u32 id
, int delete)
417 struct xfrm_policy
*pol
, **p
;
419 write_lock_bh(&xfrm_policy_lock
);
420 for (p
= &xfrm_policy_list
[id
& 7]; (pol
=*p
)!=NULL
; p
= &pol
->next
) {
421 if (pol
->index
== id
) {
428 write_unlock_bh(&xfrm_policy_lock
);
431 atomic_inc(&flow_cache_genid
);
432 xfrm_policy_kill(pol
);
436 EXPORT_SYMBOL(xfrm_policy_byid
);
438 void xfrm_policy_flush(void)
440 struct xfrm_policy
*xp
;
443 write_lock_bh(&xfrm_policy_lock
);
444 for (dir
= 0; dir
< XFRM_POLICY_MAX
; dir
++) {
445 while ((xp
= xfrm_policy_list
[dir
]) != NULL
) {
446 xfrm_policy_list
[dir
] = xp
->next
;
447 write_unlock_bh(&xfrm_policy_lock
);
449 xfrm_policy_kill(xp
);
451 write_lock_bh(&xfrm_policy_lock
);
454 atomic_inc(&flow_cache_genid
);
455 write_unlock_bh(&xfrm_policy_lock
);
457 EXPORT_SYMBOL(xfrm_policy_flush
);
459 int xfrm_policy_walk(int (*func
)(struct xfrm_policy
*, int, int, void*),
462 struct xfrm_policy
*xp
;
467 read_lock_bh(&xfrm_policy_lock
);
468 for (dir
= 0; dir
< 2*XFRM_POLICY_MAX
; dir
++) {
469 for (xp
= xfrm_policy_list
[dir
]; xp
; xp
= xp
->next
)
478 for (dir
= 0; dir
< 2*XFRM_POLICY_MAX
; dir
++) {
479 for (xp
= xfrm_policy_list
[dir
]; xp
; xp
= xp
->next
) {
480 error
= func(xp
, dir
%XFRM_POLICY_MAX
, --count
, data
);
487 read_unlock_bh(&xfrm_policy_lock
);
490 EXPORT_SYMBOL(xfrm_policy_walk
);
492 /* Find policy to apply to this flow. */
494 static void xfrm_policy_lookup(struct flowi
*fl
, u16 family
, u8 dir
,
495 void **objp
, atomic_t
**obj_refp
)
497 struct xfrm_policy
*pol
;
499 read_lock_bh(&xfrm_policy_lock
);
500 for (pol
= xfrm_policy_list
[dir
]; pol
; pol
= pol
->next
) {
501 struct xfrm_selector
*sel
= &pol
->selector
;
504 if (pol
->family
!= family
)
507 match
= xfrm_selector_match(sel
, fl
, family
);
513 read_unlock_bh(&xfrm_policy_lock
);
514 if ((*objp
= (void *) pol
) != NULL
)
515 *obj_refp
= &pol
->refcnt
;
518 static struct xfrm_policy
*xfrm_sk_policy_lookup(struct sock
*sk
, int dir
, struct flowi
*fl
)
520 struct xfrm_policy
*pol
;
522 read_lock_bh(&xfrm_policy_lock
);
523 if ((pol
= sk
->sk_policy
[dir
]) != NULL
) {
524 int match
= xfrm_selector_match(&pol
->selector
, fl
,
531 read_unlock_bh(&xfrm_policy_lock
);
535 static void __xfrm_policy_link(struct xfrm_policy
*pol
, int dir
)
537 pol
->next
= xfrm_policy_list
[dir
];
538 xfrm_policy_list
[dir
] = pol
;
542 static struct xfrm_policy
*__xfrm_policy_unlink(struct xfrm_policy
*pol
,
545 struct xfrm_policy
**polp
;
547 for (polp
= &xfrm_policy_list
[dir
];
548 *polp
!= NULL
; polp
= &(*polp
)->next
) {
557 int xfrm_policy_delete(struct xfrm_policy
*pol
, int dir
)
559 write_lock_bh(&xfrm_policy_lock
);
560 pol
= __xfrm_policy_unlink(pol
, dir
);
561 write_unlock_bh(&xfrm_policy_lock
);
563 if (dir
< XFRM_POLICY_MAX
)
564 atomic_inc(&flow_cache_genid
);
565 xfrm_policy_kill(pol
);
571 int xfrm_sk_policy_insert(struct sock
*sk
, int dir
, struct xfrm_policy
*pol
)
573 struct xfrm_policy
*old_pol
;
575 write_lock_bh(&xfrm_policy_lock
);
576 old_pol
= sk
->sk_policy
[dir
];
577 sk
->sk_policy
[dir
] = pol
;
579 pol
->curlft
.add_time
= (unsigned long)xtime
.tv_sec
;
580 pol
->index
= xfrm_gen_index(XFRM_POLICY_MAX
+dir
);
581 __xfrm_policy_link(pol
, XFRM_POLICY_MAX
+dir
);
584 __xfrm_policy_unlink(old_pol
, XFRM_POLICY_MAX
+dir
);
585 write_unlock_bh(&xfrm_policy_lock
);
588 xfrm_policy_kill(old_pol
);
593 static struct xfrm_policy
*clone_policy(struct xfrm_policy
*old
, int dir
)
595 struct xfrm_policy
*newp
= xfrm_policy_alloc(GFP_ATOMIC
);
598 newp
->selector
= old
->selector
;
599 newp
->lft
= old
->lft
;
600 newp
->curlft
= old
->curlft
;
601 newp
->action
= old
->action
;
602 newp
->flags
= old
->flags
;
603 newp
->xfrm_nr
= old
->xfrm_nr
;
604 newp
->index
= old
->index
;
605 memcpy(newp
->xfrm_vec
, old
->xfrm_vec
,
606 newp
->xfrm_nr
*sizeof(struct xfrm_tmpl
));
607 write_lock_bh(&xfrm_policy_lock
);
608 __xfrm_policy_link(newp
, XFRM_POLICY_MAX
+dir
);
609 write_unlock_bh(&xfrm_policy_lock
);
615 int __xfrm_sk_clone_policy(struct sock
*sk
)
617 struct xfrm_policy
*p0
= sk
->sk_policy
[0],
618 *p1
= sk
->sk_policy
[1];
620 sk
->sk_policy
[0] = sk
->sk_policy
[1] = NULL
;
621 if (p0
&& (sk
->sk_policy
[0] = clone_policy(p0
, 0)) == NULL
)
623 if (p1
&& (sk
->sk_policy
[1] = clone_policy(p1
, 1)) == NULL
)
628 /* Resolve list of templates for the flow, given policy. */
631 xfrm_tmpl_resolve(struct xfrm_policy
*policy
, struct flowi
*fl
,
632 struct xfrm_state
**xfrm
,
633 unsigned short family
)
637 xfrm_address_t
*daddr
= xfrm_flowi_daddr(fl
, family
);
638 xfrm_address_t
*saddr
= xfrm_flowi_saddr(fl
, family
);
640 for (nx
=0, i
= 0; i
< policy
->xfrm_nr
; i
++) {
641 struct xfrm_state
*x
;
642 xfrm_address_t
*remote
= daddr
;
643 xfrm_address_t
*local
= saddr
;
644 struct xfrm_tmpl
*tmpl
= &policy
->xfrm_vec
[i
];
647 remote
= &tmpl
->id
.daddr
;
648 local
= &tmpl
->saddr
;
651 x
= xfrm_state_find(remote
, local
, fl
, tmpl
, policy
, &error
, family
);
653 if (x
&& x
->km
.state
== XFRM_STATE_VALID
) {
660 error
= (x
->km
.state
== XFRM_STATE_ERROR
?
671 for (nx
--; nx
>=0; nx
--)
672 xfrm_state_put(xfrm
[nx
]);
676 /* Check that the bundle accepts the flow and its components are
680 static struct dst_entry
*
681 xfrm_find_bundle(struct flowi
*fl
, struct xfrm_policy
*policy
, unsigned short family
)
684 struct xfrm_policy_afinfo
*afinfo
= xfrm_policy_get_afinfo(family
);
685 if (unlikely(afinfo
== NULL
))
686 return ERR_PTR(-EINVAL
);
687 x
= afinfo
->find_bundle(fl
, policy
);
688 xfrm_policy_put_afinfo(afinfo
);
692 /* Allocate chain of dst_entry's, attach known xfrm's, calculate
693 * all the metrics... Shortly, bundle a bundle.
697 xfrm_bundle_create(struct xfrm_policy
*policy
, struct xfrm_state
**xfrm
, int nx
,
698 struct flowi
*fl
, struct dst_entry
**dst_p
,
699 unsigned short family
)
702 struct xfrm_policy_afinfo
*afinfo
= xfrm_policy_get_afinfo(family
);
703 if (unlikely(afinfo
== NULL
))
705 err
= afinfo
->bundle_create(policy
, xfrm
, nx
, fl
, dst_p
);
706 xfrm_policy_put_afinfo(afinfo
);
710 static inline int policy_to_flow_dir(int dir
)
712 if (XFRM_POLICY_IN
== FLOW_DIR_IN
&&
713 XFRM_POLICY_OUT
== FLOW_DIR_OUT
&&
714 XFRM_POLICY_FWD
== FLOW_DIR_FWD
)
720 case XFRM_POLICY_OUT
:
722 case XFRM_POLICY_FWD
:
727 static int stale_bundle(struct dst_entry
*dst
);
729 /* Main function: finds/creates a bundle for given flow.
731 * At the moment we eat a raw IP route. Mostly to speed up lookups
732 * on interfaces with disabled IPsec.
734 int xfrm_lookup(struct dst_entry
**dst_p
, struct flowi
*fl
,
735 struct sock
*sk
, int flags
)
737 struct xfrm_policy
*policy
;
738 struct xfrm_state
*xfrm
[XFRM_MAX_DEPTH
];
739 struct dst_entry
*dst
, *dst_orig
= *dst_p
;
743 u16 family
= dst_orig
->ops
->family
;
745 genid
= atomic_read(&flow_cache_genid
);
747 if (sk
&& sk
->sk_policy
[1])
748 policy
= xfrm_sk_policy_lookup(sk
, XFRM_POLICY_OUT
, fl
);
751 /* To accelerate a bit... */
752 if ((dst_orig
->flags
& DST_NOXFRM
) || !xfrm_policy_list
[XFRM_POLICY_OUT
])
755 policy
= flow_cache_lookup(fl
, family
,
756 policy_to_flow_dir(XFRM_POLICY_OUT
),
763 policy
->curlft
.use_time
= (unsigned long)xtime
.tv_sec
;
765 switch (policy
->action
) {
766 case XFRM_POLICY_BLOCK
:
767 /* Prohibit the flow */
771 case XFRM_POLICY_ALLOW
:
772 if (policy
->xfrm_nr
== 0) {
773 /* Flow passes not transformed. */
774 xfrm_pol_put(policy
);
778 /* Try to find matching bundle.
780 * LATER: help from flow cache. It is optional, this
781 * is required only for output policy.
783 dst
= xfrm_find_bundle(fl
, policy
, family
);
792 nx
= xfrm_tmpl_resolve(policy
, fl
, xfrm
, family
);
794 if (unlikely(nx
<0)) {
796 if (err
== -EAGAIN
&& flags
) {
797 DECLARE_WAITQUEUE(wait
, current
);
799 add_wait_queue(&km_waitq
, &wait
);
800 set_current_state(TASK_INTERRUPTIBLE
);
802 set_current_state(TASK_RUNNING
);
803 remove_wait_queue(&km_waitq
, &wait
);
805 nx
= xfrm_tmpl_resolve(policy
, fl
, xfrm
, family
);
807 if (nx
== -EAGAIN
&& signal_pending(current
)) {
812 genid
!= atomic_read(&flow_cache_genid
)) {
813 xfrm_pol_put(policy
);
822 /* Flow passes not transformed. */
823 xfrm_pol_put(policy
);
828 err
= xfrm_bundle_create(policy
, xfrm
, nx
, fl
, &dst
, family
);
833 xfrm_state_put(xfrm
[i
]);
837 write_lock_bh(&policy
->lock
);
838 if (unlikely(policy
->dead
|| stale_bundle(dst
))) {
839 /* Wow! While we worked on resolving, this
840 * policy has gone. Retry. It is not paranoia,
841 * we just cannot enlist new bundle to dead object.
842 * We can't enlist stable bundles either.
844 write_unlock_bh(&policy
->lock
);
846 xfrm_pol_put(policy
);
851 dst
->next
= policy
->bundles
;
852 policy
->bundles
= dst
;
854 write_unlock_bh(&policy
->lock
);
857 dst_release(dst_orig
);
858 xfrm_pol_put(policy
);
862 dst_release(dst_orig
);
863 xfrm_pol_put(policy
);
867 EXPORT_SYMBOL(xfrm_lookup
);
869 /* When skb is transformed back to its "native" form, we have to
870 * check policy restrictions. At the moment we make this in maximally
871 * stupid way. Shame on me. :-) Of course, connected sockets must
872 * have policy cached at them.
876 xfrm_state_ok(struct xfrm_tmpl
*tmpl
, struct xfrm_state
*x
,
877 unsigned short family
)
879 if (xfrm_state_kern(x
))
880 return tmpl
->optional
&& !xfrm_state_addr_cmp(tmpl
, x
, family
);
881 return x
->id
.proto
== tmpl
->id
.proto
&&
882 (x
->id
.spi
== tmpl
->id
.spi
|| !tmpl
->id
.spi
) &&
883 (x
->props
.reqid
== tmpl
->reqid
|| !tmpl
->reqid
) &&
884 x
->props
.mode
== tmpl
->mode
&&
885 (tmpl
->aalgos
& (1<<x
->props
.aalgo
)) &&
886 !(x
->props
.mode
&& xfrm_state_addr_cmp(tmpl
, x
, family
));
890 xfrm_policy_ok(struct xfrm_tmpl
*tmpl
, struct sec_path
*sp
, int start
,
891 unsigned short family
)
895 if (tmpl
->optional
) {
900 for (; idx
< sp
->len
; idx
++) {
901 if (xfrm_state_ok(tmpl
, sp
->x
[idx
].xvec
, family
))
903 if (sp
->x
[idx
].xvec
->props
.mode
)
910 _decode_session(struct sk_buff
*skb
, struct flowi
*fl
, unsigned short family
)
912 struct xfrm_policy_afinfo
*afinfo
= xfrm_policy_get_afinfo(family
);
914 if (unlikely(afinfo
== NULL
))
915 return -EAFNOSUPPORT
;
917 afinfo
->decode_session(skb
, fl
);
918 xfrm_policy_put_afinfo(afinfo
);
922 static inline int secpath_has_tunnel(struct sec_path
*sp
, int k
)
924 for (; k
< sp
->len
; k
++) {
925 if (sp
->x
[k
].xvec
->props
.mode
)
932 int __xfrm_policy_check(struct sock
*sk
, int dir
, struct sk_buff
*skb
,
933 unsigned short family
)
935 struct xfrm_policy
*pol
;
938 if (_decode_session(skb
, &fl
, family
) < 0)
941 /* First, check used SA against their selectors. */
945 for (i
=skb
->sp
->len
-1; i
>=0; i
--) {
946 struct sec_decap_state
*xvec
= &(skb
->sp
->x
[i
]);
947 if (!xfrm_selector_match(&xvec
->xvec
->sel
, &fl
, family
))
950 /* If there is a post_input processor, try running it */
951 if (xvec
->xvec
->type
->post_input
&&
952 (xvec
->xvec
->type
->post_input
)(xvec
->xvec
,
960 if (sk
&& sk
->sk_policy
[dir
])
961 pol
= xfrm_sk_policy_lookup(sk
, dir
, &fl
);
964 pol
= flow_cache_lookup(&fl
, family
,
965 policy_to_flow_dir(dir
),
969 return !skb
->sp
|| !secpath_has_tunnel(skb
->sp
, 0);
971 pol
->curlft
.use_time
= (unsigned long)xtime
.tv_sec
;
973 if (pol
->action
== XFRM_POLICY_ALLOW
) {
975 static struct sec_path dummy
;
978 if ((sp
= skb
->sp
) == NULL
)
981 /* For each tunnel xfrm, find the first matching tmpl.
982 * For each tmpl before that, find corresponding xfrm.
983 * Order is _important_. Later we will implement
984 * some barriers, but at the moment barriers
985 * are implied between each two transformations.
987 for (i
= pol
->xfrm_nr
-1, k
= 0; i
>= 0; i
--) {
988 k
= xfrm_policy_ok(pol
->xfrm_vec
+i
, sp
, k
, family
);
993 if (secpath_has_tunnel(sp
, k
))
1004 EXPORT_SYMBOL(__xfrm_policy_check
);
1006 int __xfrm_route_forward(struct sk_buff
*skb
, unsigned short family
)
1010 if (_decode_session(skb
, &fl
, family
) < 0)
1013 return xfrm_lookup(&skb
->dst
, &fl
, NULL
, 0) == 0;
1015 EXPORT_SYMBOL(__xfrm_route_forward
);
1017 /* Optimize later using cookies and generation ids. */
1019 static struct dst_entry
*xfrm_dst_check(struct dst_entry
*dst
, u32 cookie
)
1021 if (!stale_bundle(dst
))
1027 static int stale_bundle(struct dst_entry
*dst
)
1029 return !xfrm_bundle_ok((struct xfrm_dst
*)dst
, NULL
, AF_UNSPEC
);
1032 void xfrm_dst_ifdown(struct dst_entry
*dst
, struct net_device
*dev
)
1034 while ((dst
= dst
->child
) && dst
->xfrm
&& dst
->dev
== dev
) {
1035 dst
->dev
= &loopback_dev
;
1036 dev_hold(&loopback_dev
);
1040 EXPORT_SYMBOL(xfrm_dst_ifdown
);
1042 static void xfrm_link_failure(struct sk_buff
*skb
)
1044 /* Impossible. Such dst must be popped before reaches point of failure. */
1048 static struct dst_entry
*xfrm_negative_advice(struct dst_entry
*dst
)
1051 if (dst
->obsolete
) {
1059 static void xfrm_prune_bundles(int (*func
)(struct dst_entry
*))
1062 struct xfrm_policy
*pol
;
1063 struct dst_entry
*dst
, **dstp
, *gc_list
= NULL
;
1065 read_lock_bh(&xfrm_policy_lock
);
1066 for (i
=0; i
<2*XFRM_POLICY_MAX
; i
++) {
1067 for (pol
= xfrm_policy_list
[i
]; pol
; pol
= pol
->next
) {
1068 write_lock(&pol
->lock
);
1069 dstp
= &pol
->bundles
;
1070 while ((dst
=*dstp
) != NULL
) {
1073 dst
->next
= gc_list
;
1079 write_unlock(&pol
->lock
);
1082 read_unlock_bh(&xfrm_policy_lock
);
1086 gc_list
= dst
->next
;
1091 static int unused_bundle(struct dst_entry
*dst
)
1093 return !atomic_read(&dst
->__refcnt
);
1096 static void __xfrm_garbage_collect(void)
1098 xfrm_prune_bundles(unused_bundle
);
1101 int xfrm_flush_bundles(void)
1103 xfrm_prune_bundles(stale_bundle
);
1107 void xfrm_init_pmtu(struct dst_entry
*dst
)
1110 struct xfrm_dst
*xdst
= (struct xfrm_dst
*)dst
;
1111 u32 pmtu
, route_mtu_cached
;
1113 pmtu
= dst_mtu(dst
->child
);
1114 xdst
->child_mtu_cached
= pmtu
;
1116 pmtu
= xfrm_state_mtu(dst
->xfrm
, pmtu
);
1118 route_mtu_cached
= dst_mtu(xdst
->route
);
1119 xdst
->route_mtu_cached
= route_mtu_cached
;
1121 if (pmtu
> route_mtu_cached
)
1122 pmtu
= route_mtu_cached
;
1124 dst
->metrics
[RTAX_MTU
-1] = pmtu
;
1125 } while ((dst
= dst
->next
));
1128 EXPORT_SYMBOL(xfrm_init_pmtu
);
1130 /* Check that the bundle accepts the flow and its components are
1134 int xfrm_bundle_ok(struct xfrm_dst
*first
, struct flowi
*fl
, int family
)
1136 struct dst_entry
*dst
= &first
->u
.dst
;
1137 struct xfrm_dst
*last
;
1140 if (!dst_check(dst
->path
, ((struct xfrm_dst
*)dst
)->path_cookie
) ||
1141 (dst
->dev
&& !netif_running(dst
->dev
)))
1147 struct xfrm_dst
*xdst
= (struct xfrm_dst
*)dst
;
1149 if (fl
&& !xfrm_selector_match(&dst
->xfrm
->sel
, fl
, family
))
1151 if (dst
->xfrm
->km
.state
!= XFRM_STATE_VALID
)
1154 mtu
= dst_mtu(dst
->child
);
1155 if (xdst
->child_mtu_cached
!= mtu
) {
1157 xdst
->child_mtu_cached
= mtu
;
1160 if (!dst_check(xdst
->route
, xdst
->route_cookie
))
1162 mtu
= dst_mtu(xdst
->route
);
1163 if (xdst
->route_mtu_cached
!= mtu
) {
1165 xdst
->route_mtu_cached
= mtu
;
1169 } while (dst
->xfrm
);
1174 mtu
= last
->child_mtu_cached
;
1178 mtu
= xfrm_state_mtu(dst
->xfrm
, mtu
);
1179 if (mtu
> last
->route_mtu_cached
)
1180 mtu
= last
->route_mtu_cached
;
1181 dst
->metrics
[RTAX_MTU
-1] = mtu
;
1186 last
= last
->u
.next
;
1187 last
->child_mtu_cached
= mtu
;
1193 EXPORT_SYMBOL(xfrm_bundle_ok
);
1195 /* Well... that's _TASK_. We need to scan through transformation
1196 * list and figure out what mss tcp should generate in order to
1197 * final datagram fit to mtu. Mama mia... :-)
1199 * Apparently, some easy way exists, but we used to choose the most
1200 * bizarre ones. :-) So, raising Kalashnikov... tra-ta-ta.
1202 * Consider this function as something like dark humour. :-)
1204 static int xfrm_get_mss(struct dst_entry
*dst
, u32 mtu
)
1206 int res
= mtu
- dst
->header_len
;
1209 struct dst_entry
*d
= dst
;
1213 struct xfrm_state
*x
= d
->xfrm
;
1215 spin_lock_bh(&x
->lock
);
1216 if (x
->km
.state
== XFRM_STATE_VALID
&&
1217 x
->type
&& x
->type
->get_max_size
)
1218 m
= x
->type
->get_max_size(d
->xfrm
, m
);
1220 m
+= x
->props
.header_len
;
1221 spin_unlock_bh(&x
->lock
);
1223 } while ((d
= d
->child
) != NULL
);
1232 return res
+ dst
->header_len
;
1235 int xfrm_policy_register_afinfo(struct xfrm_policy_afinfo
*afinfo
)
1238 if (unlikely(afinfo
== NULL
))
1240 if (unlikely(afinfo
->family
>= NPROTO
))
1241 return -EAFNOSUPPORT
;
1242 write_lock(&xfrm_policy_afinfo_lock
);
1243 if (unlikely(xfrm_policy_afinfo
[afinfo
->family
] != NULL
))
1246 struct dst_ops
*dst_ops
= afinfo
->dst_ops
;
1247 if (likely(dst_ops
->kmem_cachep
== NULL
))
1248 dst_ops
->kmem_cachep
= xfrm_dst_cache
;
1249 if (likely(dst_ops
->check
== NULL
))
1250 dst_ops
->check
= xfrm_dst_check
;
1251 if (likely(dst_ops
->negative_advice
== NULL
))
1252 dst_ops
->negative_advice
= xfrm_negative_advice
;
1253 if (likely(dst_ops
->link_failure
== NULL
))
1254 dst_ops
->link_failure
= xfrm_link_failure
;
1255 if (likely(dst_ops
->get_mss
== NULL
))
1256 dst_ops
->get_mss
= xfrm_get_mss
;
1257 if (likely(afinfo
->garbage_collect
== NULL
))
1258 afinfo
->garbage_collect
= __xfrm_garbage_collect
;
1259 xfrm_policy_afinfo
[afinfo
->family
] = afinfo
;
1261 write_unlock(&xfrm_policy_afinfo_lock
);
1264 EXPORT_SYMBOL(xfrm_policy_register_afinfo
);
1266 int xfrm_policy_unregister_afinfo(struct xfrm_policy_afinfo
*afinfo
)
1269 if (unlikely(afinfo
== NULL
))
1271 if (unlikely(afinfo
->family
>= NPROTO
))
1272 return -EAFNOSUPPORT
;
1273 write_lock(&xfrm_policy_afinfo_lock
);
1274 if (likely(xfrm_policy_afinfo
[afinfo
->family
] != NULL
)) {
1275 if (unlikely(xfrm_policy_afinfo
[afinfo
->family
] != afinfo
))
1278 struct dst_ops
*dst_ops
= afinfo
->dst_ops
;
1279 xfrm_policy_afinfo
[afinfo
->family
] = NULL
;
1280 dst_ops
->kmem_cachep
= NULL
;
1281 dst_ops
->check
= NULL
;
1282 dst_ops
->negative_advice
= NULL
;
1283 dst_ops
->link_failure
= NULL
;
1284 dst_ops
->get_mss
= NULL
;
1285 afinfo
->garbage_collect
= NULL
;
1288 write_unlock(&xfrm_policy_afinfo_lock
);
1291 EXPORT_SYMBOL(xfrm_policy_unregister_afinfo
);
1293 static struct xfrm_policy_afinfo
*xfrm_policy_get_afinfo(unsigned short family
)
1295 struct xfrm_policy_afinfo
*afinfo
;
1296 if (unlikely(family
>= NPROTO
))
1298 read_lock(&xfrm_policy_afinfo_lock
);
1299 afinfo
= xfrm_policy_afinfo
[family
];
1300 if (likely(afinfo
!= NULL
))
1301 read_lock(&afinfo
->lock
);
1302 read_unlock(&xfrm_policy_afinfo_lock
);
1306 static void xfrm_policy_put_afinfo(struct xfrm_policy_afinfo
*afinfo
)
1308 if (unlikely(afinfo
== NULL
))
1310 read_unlock(&afinfo
->lock
);
1313 static int xfrm_dev_event(struct notifier_block
*this, unsigned long event
, void *ptr
)
1317 xfrm_flush_bundles();
1322 static struct notifier_block xfrm_dev_notifier
= {
1328 static void __init
xfrm_policy_init(void)
1330 xfrm_dst_cache
= kmem_cache_create("xfrm_dst_cache",
1331 sizeof(struct xfrm_dst
),
1332 0, SLAB_HWCACHE_ALIGN
,
1334 if (!xfrm_dst_cache
)
1335 panic("XFRM: failed to allocate xfrm_dst_cache\n");
1337 INIT_WORK(&xfrm_policy_gc_work
, xfrm_policy_gc_task
, NULL
);
1338 register_netdevice_notifier(&xfrm_dev_notifier
);
1341 void __init
xfrm_init(void)