1 /* Event cache for netfilter. */
3 /* (C) 1999-2001 Paul `Rusty' Russell
4 * (C) 2002-2006 Netfilter Core Team <coreteam@netfilter.org>
5 * (C) 2003,2004 USAGI/WIDE Project <http://www.linux-ipv6.org>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
12 #include <linux/types.h>
13 #include <linux/netfilter.h>
14 #include <linux/skbuff.h>
15 #include <linux/vmalloc.h>
16 #include <linux/stddef.h>
17 #include <linux/err.h>
18 #include <linux/percpu.h>
19 #include <linux/notifier.h>
20 #include <linux/kernel.h>
21 #include <linux/netdevice.h>
23 #include <net/netfilter/nf_conntrack.h>
24 #include <net/netfilter/nf_conntrack_core.h>
26 ATOMIC_NOTIFIER_HEAD(nf_conntrack_chain
);
27 EXPORT_SYMBOL_GPL(nf_conntrack_chain
);
29 ATOMIC_NOTIFIER_HEAD(nf_ct_expect_chain
);
30 EXPORT_SYMBOL_GPL(nf_ct_expect_chain
);
32 /* deliver cached events and clear cache entry - must be called with locally
33 * disabled softirqs */
35 __nf_ct_deliver_cached_events(struct nf_conntrack_ecache
*ecache
)
37 if (nf_ct_is_confirmed(ecache
->ct
) && !nf_ct_is_dying(ecache
->ct
)
39 struct nf_ct_event item
= {
45 atomic_notifier_call_chain(&nf_conntrack_chain
,
51 nf_ct_put(ecache
->ct
);
55 /* Deliver all cached events for a particular conntrack. This is called
56 * by code prior to async packet handling for freeing the skb */
57 void nf_ct_deliver_cached_events(const struct nf_conn
*ct
)
59 struct net
*net
= nf_ct_net(ct
);
60 struct nf_conntrack_ecache
*ecache
;
63 ecache
= per_cpu_ptr(net
->ct
.ecache
, raw_smp_processor_id());
65 __nf_ct_deliver_cached_events(ecache
);
68 EXPORT_SYMBOL_GPL(nf_ct_deliver_cached_events
);
70 /* Deliver cached events for old pending events, if current conntrack != old */
71 void __nf_ct_event_cache_init(struct nf_conn
*ct
)
73 struct net
*net
= nf_ct_net(ct
);
74 struct nf_conntrack_ecache
*ecache
;
76 /* take care of delivering potentially old events */
77 ecache
= per_cpu_ptr(net
->ct
.ecache
, raw_smp_processor_id());
78 BUG_ON(ecache
->ct
== ct
);
80 __nf_ct_deliver_cached_events(ecache
);
81 /* initialize for this conntrack/packet */
83 nf_conntrack_get(&ct
->ct_general
);
85 EXPORT_SYMBOL_GPL(__nf_ct_event_cache_init
);
87 /* flush the event cache - touches other CPU's data and must not be called
88 * while packets are still passing through the code */
89 void nf_ct_event_cache_flush(struct net
*net
)
91 struct nf_conntrack_ecache
*ecache
;
94 for_each_possible_cpu(cpu
) {
95 ecache
= per_cpu_ptr(net
->ct
.ecache
, cpu
);
97 nf_ct_put(ecache
->ct
);
101 int nf_conntrack_ecache_init(struct net
*net
)
103 net
->ct
.ecache
= alloc_percpu(struct nf_conntrack_ecache
);
109 void nf_conntrack_ecache_fini(struct net
*net
)
111 free_percpu(net
->ct
.ecache
);
114 int nf_conntrack_register_notifier(struct notifier_block
*nb
)
116 return atomic_notifier_chain_register(&nf_conntrack_chain
, nb
);
118 EXPORT_SYMBOL_GPL(nf_conntrack_register_notifier
);
120 int nf_conntrack_unregister_notifier(struct notifier_block
*nb
)
122 return atomic_notifier_chain_unregister(&nf_conntrack_chain
, nb
);
124 EXPORT_SYMBOL_GPL(nf_conntrack_unregister_notifier
);
126 int nf_ct_expect_register_notifier(struct notifier_block
*nb
)
128 return atomic_notifier_chain_register(&nf_ct_expect_chain
, nb
);
130 EXPORT_SYMBOL_GPL(nf_ct_expect_register_notifier
);
132 int nf_ct_expect_unregister_notifier(struct notifier_block
*nb
)
134 return atomic_notifier_chain_unregister(&nf_ct_expect_chain
, nb
);
136 EXPORT_SYMBOL_GPL(nf_ct_expect_unregister_notifier
);