2 * Rusty Russell (C)2000 -- This code is GPL.
3 * Patrick McHardy (c) 2006-2012
6 #include <linux/kernel.h>
7 #include <linux/slab.h>
8 #include <linux/init.h>
9 #include <linux/module.h>
10 #include <linux/proc_fs.h>
11 #include <linux/skbuff.h>
12 #include <linux/netfilter.h>
13 #include <linux/netfilter_ipv4.h>
14 #include <linux/netfilter_ipv6.h>
15 #include <linux/netfilter_bridge.h>
16 #include <linux/seq_file.h>
17 #include <linux/rcupdate.h>
18 #include <net/protocol.h>
19 #include <net/netfilter/nf_queue.h>
22 #include "nf_internals.h"
24 static const struct nf_queue_handler __rcu
*nf_queue_handler
;
27 * Hook for nfnetlink_queue to register its queue handler.
28 * We do this so that most of the NFQUEUE code can be modular.
30 * Once the queue is registered it must reinject all packets it
31 * receives, no matter what.
34 void nf_register_queue_handler(const struct nf_queue_handler
*qh
)
36 /* should never happen, we only have one queueing backend in kernel */
37 WARN_ON(rcu_access_pointer(nf_queue_handler
));
38 rcu_assign_pointer(nf_queue_handler
, qh
);
40 EXPORT_SYMBOL(nf_register_queue_handler
);
42 /* The caller must flush their queue before this */
43 void nf_unregister_queue_handler(void)
45 RCU_INIT_POINTER(nf_queue_handler
, NULL
);
47 EXPORT_SYMBOL(nf_unregister_queue_handler
);
49 static void nf_queue_sock_put(struct sock
*sk
)
58 static void nf_queue_entry_release_refs(struct nf_queue_entry
*entry
)
60 struct nf_hook_state
*state
= &entry
->state
;
62 /* Release those devices we held, or Alexey will kill me. */
66 nf_queue_sock_put(state
->sk
);
68 #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
69 dev_put(entry
->physin
);
70 dev_put(entry
->physout
);
74 void nf_queue_entry_free(struct nf_queue_entry
*entry
)
76 nf_queue_entry_release_refs(entry
);
79 EXPORT_SYMBOL_GPL(nf_queue_entry_free
);
81 static void __nf_queue_entry_init_physdevs(struct nf_queue_entry
*entry
)
83 #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
84 const struct sk_buff
*skb
= entry
->skb
;
86 if (nf_bridge_info_exists(skb
)) {
87 entry
->physin
= nf_bridge_get_physindev(skb
, entry
->state
.net
);
88 entry
->physout
= nf_bridge_get_physoutdev(skb
);
91 entry
->physout
= NULL
;
96 /* Bump dev refs so they don't vanish while packet is out */
97 bool nf_queue_entry_get_refs(struct nf_queue_entry
*entry
)
99 struct nf_hook_state
*state
= &entry
->state
;
101 if (state
->sk
&& !refcount_inc_not_zero(&state
->sk
->sk_refcnt
))
105 dev_hold(state
->out
);
107 #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
108 dev_hold(entry
->physin
);
109 dev_hold(entry
->physout
);
113 EXPORT_SYMBOL_GPL(nf_queue_entry_get_refs
);
115 void nf_queue_nf_hook_drop(struct net
*net
)
117 const struct nf_queue_handler
*qh
;
120 qh
= rcu_dereference(nf_queue_handler
);
122 qh
->nf_hook_drop(net
);
125 EXPORT_SYMBOL_GPL(nf_queue_nf_hook_drop
);
127 static void nf_ip_saveroute(const struct sk_buff
*skb
,
128 struct nf_queue_entry
*entry
)
130 struct ip_rt_info
*rt_info
= nf_queue_entry_reroute(entry
);
132 if (entry
->state
.hook
== NF_INET_LOCAL_OUT
) {
133 const struct iphdr
*iph
= ip_hdr(skb
);
135 rt_info
->tos
= iph
->tos
;
136 rt_info
->daddr
= iph
->daddr
;
137 rt_info
->saddr
= iph
->saddr
;
138 rt_info
->mark
= skb
->mark
;
142 static void nf_ip6_saveroute(const struct sk_buff
*skb
,
143 struct nf_queue_entry
*entry
)
145 struct ip6_rt_info
*rt_info
= nf_queue_entry_reroute(entry
);
147 if (entry
->state
.hook
== NF_INET_LOCAL_OUT
) {
148 const struct ipv6hdr
*iph
= ipv6_hdr(skb
);
150 rt_info
->daddr
= iph
->daddr
;
151 rt_info
->saddr
= iph
->saddr
;
152 rt_info
->mark
= skb
->mark
;
156 static int __nf_queue(struct sk_buff
*skb
, const struct nf_hook_state
*state
,
157 unsigned int index
, unsigned int queuenum
)
159 struct nf_queue_entry
*entry
= NULL
;
160 const struct nf_queue_handler
*qh
;
161 unsigned int route_key_size
;
164 /* QUEUE == DROP if no one is waiting, to be safe. */
165 qh
= rcu_dereference(nf_queue_handler
);
171 route_key_size
= sizeof(struct ip_rt_info
);
174 route_key_size
= sizeof(struct ip6_rt_info
);
181 if (skb_sk_is_prefetched(skb
)) {
182 struct sock
*sk
= skb
->sk
;
184 if (!sk_is_refcounted(sk
)) {
185 if (!refcount_inc_not_zero(&sk
->sk_refcnt
))
188 /* drop refcount on skb_orphan */
189 skb
->destructor
= sock_edemux
;
193 entry
= kmalloc(sizeof(*entry
) + route_key_size
, GFP_ATOMIC
);
197 if (skb_dst(skb
) && !skb_dst_force(skb
)) {
202 *entry
= (struct nf_queue_entry
) {
206 .size
= sizeof(*entry
) + route_key_size
,
209 __nf_queue_entry_init_physdevs(entry
);
211 if (!nf_queue_entry_get_refs(entry
)) {
216 switch (entry
->state
.pf
) {
218 nf_ip_saveroute(skb
, entry
);
221 nf_ip6_saveroute(skb
, entry
);
225 status
= qh
->outfn(entry
, queuenum
);
227 nf_queue_entry_free(entry
);
234 /* Packets leaving via this function must come back through nf_reinject(). */
235 int nf_queue(struct sk_buff
*skb
, struct nf_hook_state
*state
,
236 unsigned int index
, unsigned int verdict
)
240 ret
= __nf_queue(skb
, state
, index
, verdict
>> NF_VERDICT_QBITS
);
243 (verdict
& NF_VERDICT_FLAG_QUEUE_BYPASS
))
250 EXPORT_SYMBOL_GPL(nf_queue
);