2 * Rusty Russell (C)2000 -- This code is GPL.
3 * Patrick McHardy (c) 2006-2012
6 #include <linux/kernel.h>
7 #include <linux/slab.h>
8 #include <linux/init.h>
9 #include <linux/module.h>
10 #include <linux/proc_fs.h>
11 #include <linux/skbuff.h>
12 #include <linux/netfilter.h>
13 #include <linux/netfilter_ipv4.h>
14 #include <linux/netfilter_ipv6.h>
15 #include <linux/netfilter_bridge.h>
16 #include <linux/seq_file.h>
17 #include <linux/rcupdate.h>
18 #include <net/protocol.h>
19 #include <net/netfilter/nf_queue.h>
22 #include "nf_internals.h"
25 * Hook for nfnetlink_queue to register its queue handler.
26 * We do this so that most of the NFQUEUE code can be modular.
28 * Once the queue is registered it must reinject all packets it
29 * receives, no matter what.
32 /* return EBUSY when somebody else is registered, return EEXIST if the
33 * same handler is registered, return 0 in case of success. */
34 void nf_register_queue_handler(struct net
*net
, const struct nf_queue_handler
*qh
)
36 /* should never happen, we only have one queueing backend in kernel */
37 WARN_ON(rcu_access_pointer(net
->nf
.queue_handler
));
38 rcu_assign_pointer(net
->nf
.queue_handler
, qh
);
40 EXPORT_SYMBOL(nf_register_queue_handler
);
42 /* The caller must flush their queue before this */
43 void nf_unregister_queue_handler(struct net
*net
)
45 RCU_INIT_POINTER(net
->nf
.queue_handler
, NULL
);
47 EXPORT_SYMBOL(nf_unregister_queue_handler
);
49 static void nf_queue_entry_release_refs(struct nf_queue_entry
*entry
)
51 struct nf_hook_state
*state
= &entry
->state
;
53 /* Release those devices we held, or Alexey will kill me. */
61 #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
63 dev_put(entry
->physin
);
65 dev_put(entry
->physout
);
69 void nf_queue_entry_free(struct nf_queue_entry
*entry
)
71 nf_queue_entry_release_refs(entry
);
74 EXPORT_SYMBOL_GPL(nf_queue_entry_free
);
76 static void __nf_queue_entry_init_physdevs(struct nf_queue_entry
*entry
)
78 #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
79 const struct sk_buff
*skb
= entry
->skb
;
80 struct nf_bridge_info
*nf_bridge
;
82 nf_bridge
= nf_bridge_info_get(skb
);
84 entry
->physin
= nf_bridge_get_physindev(skb
);
85 entry
->physout
= nf_bridge_get_physoutdev(skb
);
88 entry
->physout
= NULL
;
93 /* Bump dev refs so they don't vanish while packet is out */
94 void nf_queue_entry_get_refs(struct nf_queue_entry
*entry
)
96 struct nf_hook_state
*state
= &entry
->state
;
101 dev_hold(state
->out
);
103 sock_hold(state
->sk
);
105 #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
107 dev_hold(entry
->physin
);
109 dev_hold(entry
->physout
);
112 EXPORT_SYMBOL_GPL(nf_queue_entry_get_refs
);
114 void nf_queue_nf_hook_drop(struct net
*net
)
116 const struct nf_queue_handler
*qh
;
119 qh
= rcu_dereference(net
->nf
.queue_handler
);
121 qh
->nf_hook_drop(net
);
124 EXPORT_SYMBOL_GPL(nf_queue_nf_hook_drop
);
126 static void nf_ip_saveroute(const struct sk_buff
*skb
,
127 struct nf_queue_entry
*entry
)
129 struct ip_rt_info
*rt_info
= nf_queue_entry_reroute(entry
);
131 if (entry
->state
.hook
== NF_INET_LOCAL_OUT
) {
132 const struct iphdr
*iph
= ip_hdr(skb
);
134 rt_info
->tos
= iph
->tos
;
135 rt_info
->daddr
= iph
->daddr
;
136 rt_info
->saddr
= iph
->saddr
;
137 rt_info
->mark
= skb
->mark
;
141 static void nf_ip6_saveroute(const struct sk_buff
*skb
,
142 struct nf_queue_entry
*entry
)
144 struct ip6_rt_info
*rt_info
= nf_queue_entry_reroute(entry
);
146 if (entry
->state
.hook
== NF_INET_LOCAL_OUT
) {
147 const struct ipv6hdr
*iph
= ipv6_hdr(skb
);
149 rt_info
->daddr
= iph
->daddr
;
150 rt_info
->saddr
= iph
->saddr
;
151 rt_info
->mark
= skb
->mark
;
155 static int __nf_queue(struct sk_buff
*skb
, const struct nf_hook_state
*state
,
156 unsigned int index
, unsigned int queuenum
)
158 struct nf_queue_entry
*entry
= NULL
;
159 const struct nf_queue_handler
*qh
;
160 struct net
*net
= state
->net
;
161 unsigned int route_key_size
;
164 /* QUEUE == DROP if no one is waiting, to be safe. */
165 qh
= rcu_dereference(net
->nf
.queue_handler
);
171 route_key_size
= sizeof(struct ip_rt_info
);
174 route_key_size
= sizeof(struct ip6_rt_info
);
181 entry
= kmalloc(sizeof(*entry
) + route_key_size
, GFP_ATOMIC
);
185 if (skb_dst(skb
) && !skb_dst_force(skb
)) {
190 *entry
= (struct nf_queue_entry
) {
194 .size
= sizeof(*entry
) + route_key_size
,
197 __nf_queue_entry_init_physdevs(entry
);
199 nf_queue_entry_get_refs(entry
);
201 switch (entry
->state
.pf
) {
203 nf_ip_saveroute(skb
, entry
);
206 nf_ip6_saveroute(skb
, entry
);
210 status
= qh
->outfn(entry
, queuenum
);
212 nf_queue_entry_free(entry
);
219 /* Packets leaving via this function must come back through nf_reinject(). */
220 int nf_queue(struct sk_buff
*skb
, struct nf_hook_state
*state
,
221 unsigned int index
, unsigned int verdict
)
225 ret
= __nf_queue(skb
, state
, index
, verdict
>> NF_VERDICT_QBITS
);
228 (verdict
& NF_VERDICT_FLAG_QUEUE_BYPASS
))
235 EXPORT_SYMBOL_GPL(nf_queue
);
237 static unsigned int nf_iterate(struct sk_buff
*skb
,
238 struct nf_hook_state
*state
,
239 const struct nf_hook_entries
*hooks
,
242 const struct nf_hook_entry
*hook
;
243 unsigned int verdict
, i
= *index
;
245 while (i
< hooks
->num_hook_entries
) {
246 hook
= &hooks
->hooks
[i
];
248 verdict
= nf_hook_entry_hookfn(hook
, skb
, state
);
249 if (verdict
!= NF_ACCEPT
) {
251 if (verdict
!= NF_REPEAT
)
262 static struct nf_hook_entries
*nf_hook_entries_head(const struct net
*net
, u8 pf
, u8 hooknum
)
265 #ifdef CONFIG_NETFILTER_FAMILY_BRIDGE
267 return rcu_dereference(net
->nf
.hooks_bridge
[hooknum
]);
270 return rcu_dereference(net
->nf
.hooks_ipv4
[hooknum
]);
272 return rcu_dereference(net
->nf
.hooks_ipv6
[hooknum
]);
281 /* Caller must hold rcu read-side lock */
282 void nf_reinject(struct nf_queue_entry
*entry
, unsigned int verdict
)
284 const struct nf_hook_entry
*hook_entry
;
285 const struct nf_hook_entries
*hooks
;
286 struct sk_buff
*skb
= entry
->skb
;
287 const struct net
*net
;
292 net
= entry
->state
.net
;
293 pf
= entry
->state
.pf
;
295 hooks
= nf_hook_entries_head(net
, pf
, entry
->state
.hook
);
297 i
= entry
->hook_index
;
298 if (WARN_ON_ONCE(!hooks
|| i
>= hooks
->num_hook_entries
)) {
300 nf_queue_entry_free(entry
);
304 hook_entry
= &hooks
->hooks
[i
];
306 /* Continue traversal iff userspace said ok... */
307 if (verdict
== NF_REPEAT
)
308 verdict
= nf_hook_entry_hookfn(hook_entry
, skb
, &entry
->state
);
310 if (verdict
== NF_ACCEPT
) {
311 if (nf_reroute(skb
, entry
) < 0)
315 if (verdict
== NF_ACCEPT
) {
318 verdict
= nf_iterate(skb
, &entry
->state
, hooks
, &i
);
321 switch (verdict
& NF_VERDICT_MASK
) {
325 entry
->state
.okfn(entry
->state
.net
, entry
->state
.sk
, skb
);
329 err
= nf_queue(skb
, &entry
->state
, i
, verdict
);
339 nf_queue_entry_free(entry
);
341 EXPORT_SYMBOL(nf_reinject
);