2 * Rusty Russell (C)2000 -- This code is GPL.
3 * Patrick McHardy (c) 2006-2012
6 #include <linux/kernel.h>
7 #include <linux/slab.h>
8 #include <linux/init.h>
9 #include <linux/module.h>
10 #include <linux/proc_fs.h>
11 #include <linux/skbuff.h>
12 #include <linux/netfilter.h>
13 #include <linux/netfilter_ipv4.h>
14 #include <linux/netfilter_ipv6.h>
15 #include <linux/netfilter_bridge.h>
16 #include <linux/seq_file.h>
17 #include <linux/rcupdate.h>
18 #include <net/protocol.h>
19 #include <net/netfilter/nf_queue.h>
22 #include "nf_internals.h"
25 * Hook for nfnetlink_queue to register its queue handler.
26 * We do this so that most of the NFQUEUE code can be modular.
28 * Once the queue is registered it must reinject all packets it
29 * receives, no matter what.
32 /* return EBUSY when somebody else is registered, return EEXIST if the
33 * same handler is registered, return 0 in case of success. */
34 void nf_register_queue_handler(struct net
*net
, const struct nf_queue_handler
*qh
)
36 /* should never happen, we only have one queueing backend in kernel */
37 WARN_ON(rcu_access_pointer(net
->nf
.queue_handler
));
38 rcu_assign_pointer(net
->nf
.queue_handler
, qh
);
40 EXPORT_SYMBOL(nf_register_queue_handler
);
42 /* The caller must flush their queue before this */
43 void nf_unregister_queue_handler(struct net
*net
)
45 RCU_INIT_POINTER(net
->nf
.queue_handler
, NULL
);
47 EXPORT_SYMBOL(nf_unregister_queue_handler
);
49 static void nf_queue_entry_release_br_nf_refs(struct sk_buff
*skb
)
51 #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
52 struct nf_bridge_info
*nf_bridge
= nf_bridge_info_get(skb
);
55 struct net_device
*physdev
;
57 physdev
= nf_bridge_get_physindev(skb
);
60 physdev
= nf_bridge_get_physoutdev(skb
);
67 void nf_queue_entry_release_refs(struct nf_queue_entry
*entry
)
69 struct nf_hook_state
*state
= &entry
->state
;
71 /* Release those devices we held, or Alexey will kill me. */
79 nf_queue_entry_release_br_nf_refs(entry
->skb
);
81 EXPORT_SYMBOL_GPL(nf_queue_entry_release_refs
);
83 static void nf_queue_entry_get_br_nf_refs(struct sk_buff
*skb
)
85 #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
86 struct nf_bridge_info
*nf_bridge
= nf_bridge_info_get(skb
);
89 struct net_device
*physdev
;
91 physdev
= nf_bridge_get_physindev(skb
);
94 physdev
= nf_bridge_get_physoutdev(skb
);
101 /* Bump dev refs so they don't vanish while packet is out */
102 void nf_queue_entry_get_refs(struct nf_queue_entry
*entry
)
104 struct nf_hook_state
*state
= &entry
->state
;
109 dev_hold(state
->out
);
111 sock_hold(state
->sk
);
113 nf_queue_entry_get_br_nf_refs(entry
->skb
);
115 EXPORT_SYMBOL_GPL(nf_queue_entry_get_refs
);
117 void nf_queue_nf_hook_drop(struct net
*net
)
119 const struct nf_queue_handler
*qh
;
122 qh
= rcu_dereference(net
->nf
.queue_handler
);
124 qh
->nf_hook_drop(net
);
127 EXPORT_SYMBOL_GPL(nf_queue_nf_hook_drop
);
129 static void nf_ip_saveroute(const struct sk_buff
*skb
,
130 struct nf_queue_entry
*entry
)
132 struct ip_rt_info
*rt_info
= nf_queue_entry_reroute(entry
);
134 if (entry
->state
.hook
== NF_INET_LOCAL_OUT
) {
135 const struct iphdr
*iph
= ip_hdr(skb
);
137 rt_info
->tos
= iph
->tos
;
138 rt_info
->daddr
= iph
->daddr
;
139 rt_info
->saddr
= iph
->saddr
;
140 rt_info
->mark
= skb
->mark
;
144 static void nf_ip6_saveroute(const struct sk_buff
*skb
,
145 struct nf_queue_entry
*entry
)
147 struct ip6_rt_info
*rt_info
= nf_queue_entry_reroute(entry
);
149 if (entry
->state
.hook
== NF_INET_LOCAL_OUT
) {
150 const struct ipv6hdr
*iph
= ipv6_hdr(skb
);
152 rt_info
->daddr
= iph
->daddr
;
153 rt_info
->saddr
= iph
->saddr
;
154 rt_info
->mark
= skb
->mark
;
158 static int __nf_queue(struct sk_buff
*skb
, const struct nf_hook_state
*state
,
159 const struct nf_hook_entries
*entries
,
160 unsigned int index
, unsigned int queuenum
)
162 int status
= -ENOENT
;
163 struct nf_queue_entry
*entry
= NULL
;
164 const struct nf_queue_handler
*qh
;
165 struct net
*net
= state
->net
;
166 unsigned int route_key_size
;
168 /* QUEUE == DROP if no one is waiting, to be safe. */
169 qh
= rcu_dereference(net
->nf
.queue_handler
);
177 route_key_size
= sizeof(struct ip_rt_info
);
180 route_key_size
= sizeof(struct ip6_rt_info
);
187 entry
= kmalloc(sizeof(*entry
) + route_key_size
, GFP_ATOMIC
);
193 *entry
= (struct nf_queue_entry
) {
197 .size
= sizeof(*entry
) + route_key_size
,
200 nf_queue_entry_get_refs(entry
);
203 switch (entry
->state
.pf
) {
205 nf_ip_saveroute(skb
, entry
);
208 nf_ip6_saveroute(skb
, entry
);
212 status
= qh
->outfn(entry
, queuenum
);
215 nf_queue_entry_release_refs(entry
);
226 /* Packets leaving via this function must come back through nf_reinject(). */
227 int nf_queue(struct sk_buff
*skb
, struct nf_hook_state
*state
,
228 const struct nf_hook_entries
*entries
, unsigned int index
,
229 unsigned int verdict
)
233 ret
= __nf_queue(skb
, state
, entries
, index
, verdict
>> NF_VERDICT_QBITS
);
236 (verdict
& NF_VERDICT_FLAG_QUEUE_BYPASS
))
243 EXPORT_SYMBOL_GPL(nf_queue
);
245 static unsigned int nf_iterate(struct sk_buff
*skb
,
246 struct nf_hook_state
*state
,
247 const struct nf_hook_entries
*hooks
,
250 const struct nf_hook_entry
*hook
;
251 unsigned int verdict
, i
= *index
;
253 while (i
< hooks
->num_hook_entries
) {
254 hook
= &hooks
->hooks
[i
];
256 verdict
= nf_hook_entry_hookfn(hook
, skb
, state
);
257 if (verdict
!= NF_ACCEPT
) {
258 if (verdict
!= NF_REPEAT
)
269 static struct nf_hook_entries
*nf_hook_entries_head(const struct net
*net
, u8 pf
, u8 hooknum
)
272 #ifdef CONFIG_NETFILTER_FAMILY_BRIDGE
274 return rcu_dereference(net
->nf
.hooks_bridge
[hooknum
]);
277 return rcu_dereference(net
->nf
.hooks_ipv4
[hooknum
]);
279 return rcu_dereference(net
->nf
.hooks_ipv6
[hooknum
]);
288 /* Caller must hold rcu read-side lock */
289 void nf_reinject(struct nf_queue_entry
*entry
, unsigned int verdict
)
291 const struct nf_hook_entry
*hook_entry
;
292 const struct nf_hook_entries
*hooks
;
293 struct sk_buff
*skb
= entry
->skb
;
294 const struct net
*net
;
299 net
= entry
->state
.net
;
300 pf
= entry
->state
.pf
;
302 hooks
= nf_hook_entries_head(net
, pf
, entry
->state
.hook
);
304 nf_queue_entry_release_refs(entry
);
306 i
= entry
->hook_index
;
307 if (WARN_ON_ONCE(!hooks
|| i
>= hooks
->num_hook_entries
)) {
313 hook_entry
= &hooks
->hooks
[i
];
315 /* Continue traversal iff userspace said ok... */
316 if (verdict
== NF_REPEAT
)
317 verdict
= nf_hook_entry_hookfn(hook_entry
, skb
, &entry
->state
);
319 if (verdict
== NF_ACCEPT
) {
320 if (nf_reroute(skb
, entry
) < 0)
324 if (verdict
== NF_ACCEPT
) {
327 verdict
= nf_iterate(skb
, &entry
->state
, hooks
, &i
);
330 switch (verdict
& NF_VERDICT_MASK
) {
334 entry
->state
.okfn(entry
->state
.net
, entry
->state
.sk
, skb
);
338 err
= nf_queue(skb
, &entry
->state
, hooks
, i
, verdict
);
350 EXPORT_SYMBOL(nf_reinject
);