6 * Kazunori MIYAZAWA @USAGI
7 * Kunihiro Ishiguro <kunihiro@ipinfusion.com>
9 * YOSHIFUJI Hideaki @USAGI
10 * Split up af-specific functions
11 * Derek Atkins <derek@ihtfp.com>
12 * Add UDP Encapsulation
16 #include <linux/workqueue.h>
18 #include <linux/pfkeyv2.h>
19 #include <linux/ipsec.h>
20 #include <linux/module.h>
21 #include <asm/uaccess.h>
24 EXPORT_SYMBOL(xfrm_nl
);
26 u32 sysctl_xfrm_aevent_etime
= XFRM_AE_ETIME
;
27 EXPORT_SYMBOL(sysctl_xfrm_aevent_etime
);
29 u32 sysctl_xfrm_aevent_rseqth
= XFRM_AE_SEQT_SIZE
;
30 EXPORT_SYMBOL(sysctl_xfrm_aevent_rseqth
);
32 /* Each xfrm_state may be linked to two tables:
34 1. Hash table by (spi,daddr,ah/esp) to find SA by SPI. (input,ctl)
35 2. Hash table by daddr to find what SAs exist for given
36 destination/tunnel endpoint. (output)
39 static DEFINE_SPINLOCK(xfrm_state_lock
);
41 /* Hash table to find appropriate SA towards given target (endpoint
42 * of tunnel or destination of transport mode) allowed by selector.
44 * Main use is finding SA after policy selected tunnel or transport mode.
45 * Also, it can be used by ah/esp icmp error handler to find offending SA.
47 static struct list_head xfrm_state_bydst
[XFRM_DST_HSIZE
];
48 static struct list_head xfrm_state_byspi
[XFRM_DST_HSIZE
];
50 DECLARE_WAIT_QUEUE_HEAD(km_waitq
);
51 EXPORT_SYMBOL(km_waitq
);
53 static DEFINE_RWLOCK(xfrm_state_afinfo_lock
);
54 static struct xfrm_state_afinfo
*xfrm_state_afinfo
[NPROTO
];
56 static struct work_struct xfrm_state_gc_work
;
57 static struct list_head xfrm_state_gc_list
= LIST_HEAD_INIT(xfrm_state_gc_list
);
58 static DEFINE_SPINLOCK(xfrm_state_gc_lock
);
60 static int xfrm_state_gc_flush_bundles
;
62 int __xfrm_state_delete(struct xfrm_state
*x
);
64 static struct xfrm_state_afinfo
*xfrm_state_get_afinfo(unsigned short family
);
65 static void xfrm_state_put_afinfo(struct xfrm_state_afinfo
*afinfo
);
67 int km_query(struct xfrm_state
*x
, struct xfrm_tmpl
*t
, struct xfrm_policy
*pol
);
68 void km_state_expired(struct xfrm_state
*x
, int hard
, u32 pid
);
70 static void xfrm_state_gc_destroy(struct xfrm_state
*x
)
72 if (del_timer(&x
->timer
))
74 if (del_timer(&x
->rtimer
))
81 xfrm_put_mode(x
->mode
);
83 x
->type
->destructor(x
);
84 xfrm_put_type(x
->type
);
86 security_xfrm_state_free(x
);
90 static void xfrm_state_gc_task(void *data
)
93 struct list_head
*entry
, *tmp
;
94 struct list_head gc_list
= LIST_HEAD_INIT(gc_list
);
96 if (xfrm_state_gc_flush_bundles
) {
97 xfrm_state_gc_flush_bundles
= 0;
101 spin_lock_bh(&xfrm_state_gc_lock
);
102 list_splice_init(&xfrm_state_gc_list
, &gc_list
);
103 spin_unlock_bh(&xfrm_state_gc_lock
);
105 list_for_each_safe(entry
, tmp
, &gc_list
) {
106 x
= list_entry(entry
, struct xfrm_state
, bydst
);
107 xfrm_state_gc_destroy(x
);
112 static inline unsigned long make_jiffies(long secs
)
114 if (secs
>= (MAX_SCHEDULE_TIMEOUT
-1)/HZ
)
115 return MAX_SCHEDULE_TIMEOUT
-1;
120 static void xfrm_timer_handler(unsigned long data
)
122 struct xfrm_state
*x
= (struct xfrm_state
*)data
;
123 unsigned long now
= (unsigned long)xtime
.tv_sec
;
124 long next
= LONG_MAX
;
128 if (x
->km
.state
== XFRM_STATE_DEAD
)
130 if (x
->km
.state
== XFRM_STATE_EXPIRED
)
132 if (x
->lft
.hard_add_expires_seconds
) {
133 long tmo
= x
->lft
.hard_add_expires_seconds
+
134 x
->curlft
.add_time
- now
;
140 if (x
->lft
.hard_use_expires_seconds
) {
141 long tmo
= x
->lft
.hard_use_expires_seconds
+
142 (x
->curlft
.use_time
? : now
) - now
;
150 if (x
->lft
.soft_add_expires_seconds
) {
151 long tmo
= x
->lft
.soft_add_expires_seconds
+
152 x
->curlft
.add_time
- now
;
158 if (x
->lft
.soft_use_expires_seconds
) {
159 long tmo
= x
->lft
.soft_use_expires_seconds
+
160 (x
->curlft
.use_time
? : now
) - now
;
169 km_state_expired(x
, 0, 0);
171 if (next
!= LONG_MAX
&&
172 !mod_timer(&x
->timer
, jiffies
+ make_jiffies(next
)))
177 if (x
->km
.state
== XFRM_STATE_ACQ
&& x
->id
.spi
== 0) {
178 x
->km
.state
= XFRM_STATE_EXPIRED
;
183 if (!__xfrm_state_delete(x
) && x
->id
.spi
)
184 km_state_expired(x
, 1, 0);
187 spin_unlock(&x
->lock
);
191 static void xfrm_replay_timer_handler(unsigned long data
);
193 struct xfrm_state
*xfrm_state_alloc(void)
195 struct xfrm_state
*x
;
197 x
= kzalloc(sizeof(struct xfrm_state
), GFP_ATOMIC
);
200 atomic_set(&x
->refcnt
, 1);
201 atomic_set(&x
->tunnel_users
, 0);
202 INIT_LIST_HEAD(&x
->bydst
);
203 INIT_LIST_HEAD(&x
->byspi
);
204 init_timer(&x
->timer
);
205 x
->timer
.function
= xfrm_timer_handler
;
206 x
->timer
.data
= (unsigned long)x
;
207 init_timer(&x
->rtimer
);
208 x
->rtimer
.function
= xfrm_replay_timer_handler
;
209 x
->rtimer
.data
= (unsigned long)x
;
210 x
->curlft
.add_time
= (unsigned long)xtime
.tv_sec
;
211 x
->lft
.soft_byte_limit
= XFRM_INF
;
212 x
->lft
.soft_packet_limit
= XFRM_INF
;
213 x
->lft
.hard_byte_limit
= XFRM_INF
;
214 x
->lft
.hard_packet_limit
= XFRM_INF
;
215 x
->replay_maxage
= 0;
216 x
->replay_maxdiff
= 0;
217 spin_lock_init(&x
->lock
);
221 EXPORT_SYMBOL(xfrm_state_alloc
);
223 void __xfrm_state_destroy(struct xfrm_state
*x
)
225 BUG_TRAP(x
->km
.state
== XFRM_STATE_DEAD
);
227 spin_lock_bh(&xfrm_state_gc_lock
);
228 list_add(&x
->bydst
, &xfrm_state_gc_list
);
229 spin_unlock_bh(&xfrm_state_gc_lock
);
230 schedule_work(&xfrm_state_gc_work
);
232 EXPORT_SYMBOL(__xfrm_state_destroy
);
234 int __xfrm_state_delete(struct xfrm_state
*x
)
238 if (x
->km
.state
!= XFRM_STATE_DEAD
) {
239 x
->km
.state
= XFRM_STATE_DEAD
;
240 spin_lock(&xfrm_state_lock
);
247 spin_unlock(&xfrm_state_lock
);
248 if (del_timer(&x
->timer
))
250 if (del_timer(&x
->rtimer
))
253 /* The number two in this test is the reference
254 * mentioned in the comment below plus the reference
255 * our caller holds. A larger value means that
256 * there are DSTs attached to this xfrm_state.
258 if (atomic_read(&x
->refcnt
) > 2) {
259 xfrm_state_gc_flush_bundles
= 1;
260 schedule_work(&xfrm_state_gc_work
);
263 /* All xfrm_state objects are created by xfrm_state_alloc.
264 * The xfrm_state_alloc call gives a reference, and that
265 * is what we are dropping here.
273 EXPORT_SYMBOL(__xfrm_state_delete
);
275 int xfrm_state_delete(struct xfrm_state
*x
)
279 spin_lock_bh(&x
->lock
);
280 err
= __xfrm_state_delete(x
);
281 spin_unlock_bh(&x
->lock
);
285 EXPORT_SYMBOL(xfrm_state_delete
);
287 void xfrm_state_flush(u8 proto
)
290 struct xfrm_state
*x
;
292 spin_lock_bh(&xfrm_state_lock
);
293 for (i
= 0; i
< XFRM_DST_HSIZE
; i
++) {
295 list_for_each_entry(x
, xfrm_state_bydst
+i
, bydst
) {
296 if (!xfrm_state_kern(x
) &&
297 (proto
== IPSEC_PROTO_ANY
|| x
->id
.proto
== proto
)) {
299 spin_unlock_bh(&xfrm_state_lock
);
301 xfrm_state_delete(x
);
304 spin_lock_bh(&xfrm_state_lock
);
309 spin_unlock_bh(&xfrm_state_lock
);
312 EXPORT_SYMBOL(xfrm_state_flush
);
315 xfrm_init_tempsel(struct xfrm_state
*x
, struct flowi
*fl
,
316 struct xfrm_tmpl
*tmpl
,
317 xfrm_address_t
*daddr
, xfrm_address_t
*saddr
,
318 unsigned short family
)
320 struct xfrm_state_afinfo
*afinfo
= xfrm_state_get_afinfo(family
);
323 afinfo
->init_tempsel(x
, fl
, tmpl
, daddr
, saddr
);
324 xfrm_state_put_afinfo(afinfo
);
329 xfrm_state_find(xfrm_address_t
*daddr
, xfrm_address_t
*saddr
,
330 struct flowi
*fl
, struct xfrm_tmpl
*tmpl
,
331 struct xfrm_policy
*pol
, int *err
,
332 unsigned short family
)
334 unsigned h
= xfrm_dst_hash(daddr
, family
);
335 struct xfrm_state
*x
, *x0
;
336 int acquire_in_progress
= 0;
338 struct xfrm_state
*best
= NULL
;
339 struct xfrm_state_afinfo
*afinfo
;
341 afinfo
= xfrm_state_get_afinfo(family
);
342 if (afinfo
== NULL
) {
343 *err
= -EAFNOSUPPORT
;
347 spin_lock_bh(&xfrm_state_lock
);
348 list_for_each_entry(x
, xfrm_state_bydst
+h
, bydst
) {
349 if (x
->props
.family
== family
&&
350 x
->props
.reqid
== tmpl
->reqid
&&
351 xfrm_state_addr_check(x
, daddr
, saddr
, family
) &&
352 tmpl
->mode
== x
->props
.mode
&&
353 tmpl
->id
.proto
== x
->id
.proto
&&
354 (tmpl
->id
.spi
== x
->id
.spi
|| !tmpl
->id
.spi
)) {
356 1. There is a valid state with matching selector.
358 2. Valid state with inappropriate selector. Skip.
360 Entering area of "sysdeps".
362 3. If state is not valid, selector is temporary,
363 it selects only session which triggered
364 previous resolution. Key manager will do
365 something to install a state with proper
368 if (x
->km
.state
== XFRM_STATE_VALID
) {
369 if (!xfrm_selector_match(&x
->sel
, fl
, family
) ||
370 !xfrm_sec_ctx_match(pol
->security
, x
->security
))
373 best
->km
.dying
> x
->km
.dying
||
374 (best
->km
.dying
== x
->km
.dying
&&
375 best
->curlft
.add_time
< x
->curlft
.add_time
))
377 } else if (x
->km
.state
== XFRM_STATE_ACQ
) {
378 acquire_in_progress
= 1;
379 } else if (x
->km
.state
== XFRM_STATE_ERROR
||
380 x
->km
.state
== XFRM_STATE_EXPIRED
) {
381 if (xfrm_selector_match(&x
->sel
, fl
, family
) &&
382 xfrm_sec_ctx_match(pol
->security
, x
->security
))
389 if (!x
&& !error
&& !acquire_in_progress
) {
391 (x0
= afinfo
->state_lookup(daddr
, tmpl
->id
.spi
,
392 tmpl
->id
.proto
)) != NULL
) {
397 x
= xfrm_state_alloc();
402 /* Initialize temporary selector matching only
403 * to current session. */
404 xfrm_init_tempsel(x
, fl
, tmpl
, daddr
, saddr
, family
);
406 if (km_query(x
, tmpl
, pol
) == 0) {
407 x
->km
.state
= XFRM_STATE_ACQ
;
408 list_add_tail(&x
->bydst
, xfrm_state_bydst
+h
);
411 h
= xfrm_spi_hash(&x
->id
.daddr
, x
->id
.spi
, x
->id
.proto
, family
);
412 list_add(&x
->byspi
, xfrm_state_byspi
+h
);
415 x
->lft
.hard_add_expires_seconds
= XFRM_ACQ_EXPIRES
;
417 x
->timer
.expires
= jiffies
+ XFRM_ACQ_EXPIRES
*HZ
;
418 add_timer(&x
->timer
);
420 x
->km
.state
= XFRM_STATE_DEAD
;
430 *err
= acquire_in_progress
? -EAGAIN
: error
;
431 spin_unlock_bh(&xfrm_state_lock
);
432 xfrm_state_put_afinfo(afinfo
);
436 static void __xfrm_state_insert(struct xfrm_state
*x
)
438 unsigned h
= xfrm_dst_hash(&x
->id
.daddr
, x
->props
.family
);
440 list_add(&x
->bydst
, xfrm_state_bydst
+h
);
443 h
= xfrm_spi_hash(&x
->id
.daddr
, x
->id
.spi
, x
->id
.proto
, x
->props
.family
);
445 list_add(&x
->byspi
, xfrm_state_byspi
+h
);
448 if (!mod_timer(&x
->timer
, jiffies
+ HZ
))
451 if (x
->replay_maxage
&&
452 !mod_timer(&x
->rtimer
, jiffies
+ x
->replay_maxage
))
458 void xfrm_state_insert(struct xfrm_state
*x
)
460 spin_lock_bh(&xfrm_state_lock
);
461 __xfrm_state_insert(x
);
462 spin_unlock_bh(&xfrm_state_lock
);
464 xfrm_flush_all_bundles();
466 EXPORT_SYMBOL(xfrm_state_insert
);
468 static struct xfrm_state
*__xfrm_find_acq_byseq(u32 seq
);
470 int xfrm_state_add(struct xfrm_state
*x
)
472 struct xfrm_state_afinfo
*afinfo
;
473 struct xfrm_state
*x1
;
477 family
= x
->props
.family
;
478 afinfo
= xfrm_state_get_afinfo(family
);
479 if (unlikely(afinfo
== NULL
))
480 return -EAFNOSUPPORT
;
482 spin_lock_bh(&xfrm_state_lock
);
484 x1
= afinfo
->state_lookup(&x
->id
.daddr
, x
->id
.spi
, x
->id
.proto
);
493 x1
= __xfrm_find_acq_byseq(x
->km
.seq
);
494 if (x1
&& xfrm_addr_cmp(&x1
->id
.daddr
, &x
->id
.daddr
, family
)) {
501 x1
= afinfo
->find_acq(
502 x
->props
.mode
, x
->props
.reqid
, x
->id
.proto
,
503 &x
->id
.daddr
, &x
->props
.saddr
, 0);
505 __xfrm_state_insert(x
);
509 spin_unlock_bh(&xfrm_state_lock
);
510 xfrm_state_put_afinfo(afinfo
);
513 xfrm_flush_all_bundles();
516 xfrm_state_delete(x1
);
522 EXPORT_SYMBOL(xfrm_state_add
);
524 int xfrm_state_update(struct xfrm_state
*x
)
526 struct xfrm_state_afinfo
*afinfo
;
527 struct xfrm_state
*x1
;
530 afinfo
= xfrm_state_get_afinfo(x
->props
.family
);
531 if (unlikely(afinfo
== NULL
))
532 return -EAFNOSUPPORT
;
534 spin_lock_bh(&xfrm_state_lock
);
535 x1
= afinfo
->state_lookup(&x
->id
.daddr
, x
->id
.spi
, x
->id
.proto
);
541 if (xfrm_state_kern(x1
)) {
547 if (x1
->km
.state
== XFRM_STATE_ACQ
) {
548 __xfrm_state_insert(x
);
554 spin_unlock_bh(&xfrm_state_lock
);
555 xfrm_state_put_afinfo(afinfo
);
561 xfrm_state_delete(x1
);
567 spin_lock_bh(&x1
->lock
);
568 if (likely(x1
->km
.state
== XFRM_STATE_VALID
)) {
569 if (x
->encap
&& x1
->encap
)
570 memcpy(x1
->encap
, x
->encap
, sizeof(*x1
->encap
));
571 memcpy(&x1
->lft
, &x
->lft
, sizeof(x1
->lft
));
574 if (!mod_timer(&x1
->timer
, jiffies
+ HZ
))
576 if (x1
->curlft
.use_time
)
577 xfrm_state_check_expire(x1
);
581 spin_unlock_bh(&x1
->lock
);
587 EXPORT_SYMBOL(xfrm_state_update
);
589 int xfrm_state_check_expire(struct xfrm_state
*x
)
591 if (!x
->curlft
.use_time
)
592 x
->curlft
.use_time
= (unsigned long)xtime
.tv_sec
;
594 if (x
->km
.state
!= XFRM_STATE_VALID
)
597 if (x
->curlft
.bytes
>= x
->lft
.hard_byte_limit
||
598 x
->curlft
.packets
>= x
->lft
.hard_packet_limit
) {
599 x
->km
.state
= XFRM_STATE_EXPIRED
;
600 if (!mod_timer(&x
->timer
, jiffies
))
606 (x
->curlft
.bytes
>= x
->lft
.soft_byte_limit
||
607 x
->curlft
.packets
>= x
->lft
.soft_packet_limit
)) {
609 km_state_expired(x
, 0, 0);
613 EXPORT_SYMBOL(xfrm_state_check_expire
);
615 static int xfrm_state_check_space(struct xfrm_state
*x
, struct sk_buff
*skb
)
617 int nhead
= x
->props
.header_len
+ LL_RESERVED_SPACE(skb
->dst
->dev
)
621 return pskb_expand_head(skb
, nhead
, 0, GFP_ATOMIC
);
623 /* Check tail too... */
627 int xfrm_state_check(struct xfrm_state
*x
, struct sk_buff
*skb
)
629 int err
= xfrm_state_check_expire(x
);
632 err
= xfrm_state_check_space(x
, skb
);
636 EXPORT_SYMBOL(xfrm_state_check
);
639 xfrm_state_lookup(xfrm_address_t
*daddr
, u32 spi
, u8 proto
,
640 unsigned short family
)
642 struct xfrm_state
*x
;
643 struct xfrm_state_afinfo
*afinfo
= xfrm_state_get_afinfo(family
);
647 spin_lock_bh(&xfrm_state_lock
);
648 x
= afinfo
->state_lookup(daddr
, spi
, proto
);
649 spin_unlock_bh(&xfrm_state_lock
);
650 xfrm_state_put_afinfo(afinfo
);
653 EXPORT_SYMBOL(xfrm_state_lookup
);
656 xfrm_find_acq(u8 mode
, u32 reqid
, u8 proto
,
657 xfrm_address_t
*daddr
, xfrm_address_t
*saddr
,
658 int create
, unsigned short family
)
660 struct xfrm_state
*x
;
661 struct xfrm_state_afinfo
*afinfo
= xfrm_state_get_afinfo(family
);
665 spin_lock_bh(&xfrm_state_lock
);
666 x
= afinfo
->find_acq(mode
, reqid
, proto
, daddr
, saddr
, create
);
667 spin_unlock_bh(&xfrm_state_lock
);
668 xfrm_state_put_afinfo(afinfo
);
671 EXPORT_SYMBOL(xfrm_find_acq
);
673 /* Silly enough, but I'm lazy to build resolution list */
675 static struct xfrm_state
*__xfrm_find_acq_byseq(u32 seq
)
678 struct xfrm_state
*x
;
680 for (i
= 0; i
< XFRM_DST_HSIZE
; i
++) {
681 list_for_each_entry(x
, xfrm_state_bydst
+i
, bydst
) {
682 if (x
->km
.seq
== seq
&& x
->km
.state
== XFRM_STATE_ACQ
) {
691 struct xfrm_state
*xfrm_find_acq_byseq(u32 seq
)
693 struct xfrm_state
*x
;
695 spin_lock_bh(&xfrm_state_lock
);
696 x
= __xfrm_find_acq_byseq(seq
);
697 spin_unlock_bh(&xfrm_state_lock
);
700 EXPORT_SYMBOL(xfrm_find_acq_byseq
);
702 u32
xfrm_get_acqseq(void)
706 static DEFINE_SPINLOCK(acqseq_lock
);
708 spin_lock_bh(&acqseq_lock
);
709 res
= (++acqseq
? : ++acqseq
);
710 spin_unlock_bh(&acqseq_lock
);
713 EXPORT_SYMBOL(xfrm_get_acqseq
);
716 xfrm_alloc_spi(struct xfrm_state
*x
, u32 minspi
, u32 maxspi
)
719 struct xfrm_state
*x0
;
724 if (minspi
== maxspi
) {
725 x0
= xfrm_state_lookup(&x
->id
.daddr
, minspi
, x
->id
.proto
, x
->props
.family
);
733 minspi
= ntohl(minspi
);
734 maxspi
= ntohl(maxspi
);
735 for (h
=0; h
<maxspi
-minspi
+1; h
++) {
736 spi
= minspi
+ net_random()%(maxspi
-minspi
+1);
737 x0
= xfrm_state_lookup(&x
->id
.daddr
, htonl(spi
), x
->id
.proto
, x
->props
.family
);
739 x
->id
.spi
= htonl(spi
);
746 spin_lock_bh(&xfrm_state_lock
);
747 h
= xfrm_spi_hash(&x
->id
.daddr
, x
->id
.spi
, x
->id
.proto
, x
->props
.family
);
748 list_add(&x
->byspi
, xfrm_state_byspi
+h
);
750 spin_unlock_bh(&xfrm_state_lock
);
754 EXPORT_SYMBOL(xfrm_alloc_spi
);
756 int xfrm_state_walk(u8 proto
, int (*func
)(struct xfrm_state
*, int, void*),
760 struct xfrm_state
*x
;
764 spin_lock_bh(&xfrm_state_lock
);
765 for (i
= 0; i
< XFRM_DST_HSIZE
; i
++) {
766 list_for_each_entry(x
, xfrm_state_bydst
+i
, bydst
) {
767 if (proto
== IPSEC_PROTO_ANY
|| x
->id
.proto
== proto
)
776 for (i
= 0; i
< XFRM_DST_HSIZE
; i
++) {
777 list_for_each_entry(x
, xfrm_state_bydst
+i
, bydst
) {
778 if (proto
!= IPSEC_PROTO_ANY
&& x
->id
.proto
!= proto
)
780 err
= func(x
, --count
, data
);
786 spin_unlock_bh(&xfrm_state_lock
);
789 EXPORT_SYMBOL(xfrm_state_walk
);
792 void xfrm_replay_notify(struct xfrm_state
*x
, int event
)
795 /* we send notify messages in case
796 * 1. we updated on of the sequence numbers, and the seqno difference
797 * is at least x->replay_maxdiff, in this case we also update the
798 * timeout of our timer function
799 * 2. if x->replay_maxage has elapsed since last update,
800 * and there were changes
802 * The state structure must be locked!
806 case XFRM_REPLAY_UPDATE
:
807 if (x
->replay_maxdiff
&&
808 (x
->replay
.seq
- x
->preplay
.seq
< x
->replay_maxdiff
) &&
809 (x
->replay
.oseq
- x
->preplay
.oseq
< x
->replay_maxdiff
)) {
810 if (x
->xflags
& XFRM_TIME_DEFER
)
811 event
= XFRM_REPLAY_TIMEOUT
;
818 case XFRM_REPLAY_TIMEOUT
:
819 if ((x
->replay
.seq
== x
->preplay
.seq
) &&
820 (x
->replay
.bitmap
== x
->preplay
.bitmap
) &&
821 (x
->replay
.oseq
== x
->preplay
.oseq
)) {
822 x
->xflags
|= XFRM_TIME_DEFER
;
829 memcpy(&x
->preplay
, &x
->replay
, sizeof(struct xfrm_replay_state
));
830 c
.event
= XFRM_MSG_NEWAE
;
831 c
.data
.aevent
= event
;
832 km_state_notify(x
, &c
);
834 if (x
->replay_maxage
&&
835 !mod_timer(&x
->rtimer
, jiffies
+ x
->replay_maxage
)) {
837 x
->xflags
&= ~XFRM_TIME_DEFER
;
840 EXPORT_SYMBOL(xfrm_replay_notify
);
842 static void xfrm_replay_timer_handler(unsigned long data
)
844 struct xfrm_state
*x
= (struct xfrm_state
*)data
;
848 if (x
->km
.state
== XFRM_STATE_VALID
) {
849 if (xfrm_aevent_is_on())
850 xfrm_replay_notify(x
, XFRM_REPLAY_TIMEOUT
);
852 x
->xflags
|= XFRM_TIME_DEFER
;
855 spin_unlock(&x
->lock
);
859 int xfrm_replay_check(struct xfrm_state
*x
, u32 seq
)
865 if (unlikely(seq
== 0))
868 if (likely(seq
> x
->replay
.seq
))
871 diff
= x
->replay
.seq
- seq
;
872 if (diff
>= x
->props
.replay_window
) {
873 x
->stats
.replay_window
++;
877 if (x
->replay
.bitmap
& (1U << diff
)) {
883 EXPORT_SYMBOL(xfrm_replay_check
);
885 void xfrm_replay_advance(struct xfrm_state
*x
, u32 seq
)
891 if (seq
> x
->replay
.seq
) {
892 diff
= seq
- x
->replay
.seq
;
893 if (diff
< x
->props
.replay_window
)
894 x
->replay
.bitmap
= ((x
->replay
.bitmap
) << diff
) | 1;
896 x
->replay
.bitmap
= 1;
899 diff
= x
->replay
.seq
- seq
;
900 x
->replay
.bitmap
|= (1U << diff
);
903 if (xfrm_aevent_is_on())
904 xfrm_replay_notify(x
, XFRM_REPLAY_UPDATE
);
906 EXPORT_SYMBOL(xfrm_replay_advance
);
908 static struct list_head xfrm_km_list
= LIST_HEAD_INIT(xfrm_km_list
);
909 static DEFINE_RWLOCK(xfrm_km_lock
);
911 void km_policy_notify(struct xfrm_policy
*xp
, int dir
, struct km_event
*c
)
915 read_lock(&xfrm_km_lock
);
916 list_for_each_entry(km
, &xfrm_km_list
, list
)
917 if (km
->notify_policy
)
918 km
->notify_policy(xp
, dir
, c
);
919 read_unlock(&xfrm_km_lock
);
922 void km_state_notify(struct xfrm_state
*x
, struct km_event
*c
)
925 read_lock(&xfrm_km_lock
);
926 list_for_each_entry(km
, &xfrm_km_list
, list
)
929 read_unlock(&xfrm_km_lock
);
932 EXPORT_SYMBOL(km_policy_notify
);
933 EXPORT_SYMBOL(km_state_notify
);
935 void km_state_expired(struct xfrm_state
*x
, int hard
, u32 pid
)
941 c
.event
= XFRM_MSG_EXPIRE
;
942 km_state_notify(x
, &c
);
948 EXPORT_SYMBOL(km_state_expired
);
950 * We send to all registered managers regardless of failure
951 * We are happy with one success
953 int km_query(struct xfrm_state
*x
, struct xfrm_tmpl
*t
, struct xfrm_policy
*pol
)
955 int err
= -EINVAL
, acqret
;
958 read_lock(&xfrm_km_lock
);
959 list_for_each_entry(km
, &xfrm_km_list
, list
) {
960 acqret
= km
->acquire(x
, t
, pol
, XFRM_POLICY_OUT
);
964 read_unlock(&xfrm_km_lock
);
967 EXPORT_SYMBOL(km_query
);
969 int km_new_mapping(struct xfrm_state
*x
, xfrm_address_t
*ipaddr
, u16 sport
)
974 read_lock(&xfrm_km_lock
);
975 list_for_each_entry(km
, &xfrm_km_list
, list
) {
977 err
= km
->new_mapping(x
, ipaddr
, sport
);
981 read_unlock(&xfrm_km_lock
);
984 EXPORT_SYMBOL(km_new_mapping
);
986 void km_policy_expired(struct xfrm_policy
*pol
, int dir
, int hard
, u32 pid
)
992 c
.event
= XFRM_MSG_POLEXPIRE
;
993 km_policy_notify(pol
, dir
, &c
);
998 EXPORT_SYMBOL(km_policy_expired
);
1000 int xfrm_user_policy(struct sock
*sk
, int optname
, u8 __user
*optval
, int optlen
)
1004 struct xfrm_mgr
*km
;
1005 struct xfrm_policy
*pol
= NULL
;
1007 if (optlen
<= 0 || optlen
> PAGE_SIZE
)
1010 data
= kmalloc(optlen
, GFP_KERNEL
);
1015 if (copy_from_user(data
, optval
, optlen
))
1019 read_lock(&xfrm_km_lock
);
1020 list_for_each_entry(km
, &xfrm_km_list
, list
) {
1021 pol
= km
->compile_policy(sk
->sk_family
, optname
, data
,
1026 read_unlock(&xfrm_km_lock
);
1029 xfrm_sk_policy_insert(sk
, err
, pol
);
1038 EXPORT_SYMBOL(xfrm_user_policy
);
1040 int xfrm_register_km(struct xfrm_mgr
*km
)
1042 write_lock_bh(&xfrm_km_lock
);
1043 list_add_tail(&km
->list
, &xfrm_km_list
);
1044 write_unlock_bh(&xfrm_km_lock
);
1047 EXPORT_SYMBOL(xfrm_register_km
);
1049 int xfrm_unregister_km(struct xfrm_mgr
*km
)
1051 write_lock_bh(&xfrm_km_lock
);
1052 list_del(&km
->list
);
1053 write_unlock_bh(&xfrm_km_lock
);
1056 EXPORT_SYMBOL(xfrm_unregister_km
);
1058 int xfrm_state_register_afinfo(struct xfrm_state_afinfo
*afinfo
)
1061 if (unlikely(afinfo
== NULL
))
1063 if (unlikely(afinfo
->family
>= NPROTO
))
1064 return -EAFNOSUPPORT
;
1065 write_lock_bh(&xfrm_state_afinfo_lock
);
1066 if (unlikely(xfrm_state_afinfo
[afinfo
->family
] != NULL
))
1069 afinfo
->state_bydst
= xfrm_state_bydst
;
1070 afinfo
->state_byspi
= xfrm_state_byspi
;
1071 xfrm_state_afinfo
[afinfo
->family
] = afinfo
;
1073 write_unlock_bh(&xfrm_state_afinfo_lock
);
1076 EXPORT_SYMBOL(xfrm_state_register_afinfo
);
1078 int xfrm_state_unregister_afinfo(struct xfrm_state_afinfo
*afinfo
)
1081 if (unlikely(afinfo
== NULL
))
1083 if (unlikely(afinfo
->family
>= NPROTO
))
1084 return -EAFNOSUPPORT
;
1085 write_lock_bh(&xfrm_state_afinfo_lock
);
1086 if (likely(xfrm_state_afinfo
[afinfo
->family
] != NULL
)) {
1087 if (unlikely(xfrm_state_afinfo
[afinfo
->family
] != afinfo
))
1090 xfrm_state_afinfo
[afinfo
->family
] = NULL
;
1091 afinfo
->state_byspi
= NULL
;
1092 afinfo
->state_bydst
= NULL
;
1095 write_unlock_bh(&xfrm_state_afinfo_lock
);
1098 EXPORT_SYMBOL(xfrm_state_unregister_afinfo
);
1100 static struct xfrm_state_afinfo
*xfrm_state_get_afinfo(unsigned short family
)
1102 struct xfrm_state_afinfo
*afinfo
;
1103 if (unlikely(family
>= NPROTO
))
1105 read_lock(&xfrm_state_afinfo_lock
);
1106 afinfo
= xfrm_state_afinfo
[family
];
1107 if (unlikely(!afinfo
))
1108 read_unlock(&xfrm_state_afinfo_lock
);
1112 static void xfrm_state_put_afinfo(struct xfrm_state_afinfo
*afinfo
)
1114 read_unlock(&xfrm_state_afinfo_lock
);
1117 /* Temporarily located here until net/xfrm/xfrm_tunnel.c is created */
1118 void xfrm_state_delete_tunnel(struct xfrm_state
*x
)
1121 struct xfrm_state
*t
= x
->tunnel
;
1123 if (atomic_read(&t
->tunnel_users
) == 2)
1124 xfrm_state_delete(t
);
1125 atomic_dec(&t
->tunnel_users
);
1130 EXPORT_SYMBOL(xfrm_state_delete_tunnel
);
1133 * This function is NOT optimal. For example, with ESP it will give an
1134 * MTU that's usually two bytes short of being optimal. However, it will
1135 * usually give an answer that's a multiple of 4 provided the input is
1136 * also a multiple of 4.
1138 int xfrm_state_mtu(struct xfrm_state
*x
, int mtu
)
1142 res
-= x
->props
.header_len
;
1150 spin_lock_bh(&x
->lock
);
1151 if (x
->km
.state
== XFRM_STATE_VALID
&&
1152 x
->type
&& x
->type
->get_max_size
)
1153 m
= x
->type
->get_max_size(x
, m
);
1155 m
+= x
->props
.header_len
;
1156 spin_unlock_bh(&x
->lock
);
1166 int xfrm_init_state(struct xfrm_state
*x
)
1168 struct xfrm_state_afinfo
*afinfo
;
1169 int family
= x
->props
.family
;
1172 err
= -EAFNOSUPPORT
;
1173 afinfo
= xfrm_state_get_afinfo(family
);
1178 if (afinfo
->init_flags
)
1179 err
= afinfo
->init_flags(x
);
1181 xfrm_state_put_afinfo(afinfo
);
1186 err
= -EPROTONOSUPPORT
;
1187 x
->type
= xfrm_get_type(x
->id
.proto
, family
);
1188 if (x
->type
== NULL
)
1191 err
= x
->type
->init_state(x
);
1195 x
->mode
= xfrm_get_mode(x
->props
.mode
, family
);
1196 if (x
->mode
== NULL
)
1199 x
->km
.state
= XFRM_STATE_VALID
;
1205 EXPORT_SYMBOL(xfrm_init_state
);
1207 void __init
xfrm_state_init(void)
1211 for (i
=0; i
<XFRM_DST_HSIZE
; i
++) {
1212 INIT_LIST_HEAD(&xfrm_state_bydst
[i
]);
1213 INIT_LIST_HEAD(&xfrm_state_byspi
[i
]);
1215 INIT_WORK(&xfrm_state_gc_work
, xfrm_state_gc_task
, NULL
);