2 * Rusty Russell (C)2000 -- This code is GPL.
3 * Patrick McHardy (c) 2006-2012
6 #include <linux/kernel.h>
7 #include <linux/slab.h>
8 #include <linux/init.h>
9 #include <linux/module.h>
10 #include <linux/proc_fs.h>
11 #include <linux/skbuff.h>
12 #include <linux/netfilter.h>
13 #include <linux/netfilter_bridge.h>
14 #include <linux/seq_file.h>
15 #include <linux/rcupdate.h>
16 #include <net/protocol.h>
17 #include <net/netfilter/nf_queue.h>
20 #include "nf_internals.h"
23 * Hook for nfnetlink_queue to register its queue handler.
24 * We do this so that most of the NFQUEUE code can be modular.
26 * Once the queue is registered it must reinject all packets it
27 * receives, no matter what.
30 /* return EBUSY when somebody else is registered, return EEXIST if the
31 * same handler is registered, return 0 in case of success. */
32 void nf_register_queue_handler(struct net
*net
, const struct nf_queue_handler
*qh
)
34 /* should never happen, we only have one queueing backend in kernel */
35 WARN_ON(rcu_access_pointer(net
->nf
.queue_handler
));
36 rcu_assign_pointer(net
->nf
.queue_handler
, qh
);
38 EXPORT_SYMBOL(nf_register_queue_handler
);
40 /* The caller must flush their queue before this */
41 void nf_unregister_queue_handler(struct net
*net
)
43 RCU_INIT_POINTER(net
->nf
.queue_handler
, NULL
);
45 EXPORT_SYMBOL(nf_unregister_queue_handler
);
47 void nf_queue_entry_release_refs(struct nf_queue_entry
*entry
)
49 struct nf_hook_state
*state
= &entry
->state
;
51 /* Release those devices we held, or Alexey will kill me. */
58 #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
59 if (entry
->skb
->nf_bridge
) {
60 struct net_device
*physdev
;
62 physdev
= nf_bridge_get_physindev(entry
->skb
);
65 physdev
= nf_bridge_get_physoutdev(entry
->skb
);
71 EXPORT_SYMBOL_GPL(nf_queue_entry_release_refs
);
73 /* Bump dev refs so they don't vanish while packet is out */
74 void nf_queue_entry_get_refs(struct nf_queue_entry
*entry
)
76 struct nf_hook_state
*state
= &entry
->state
;
84 #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
85 if (entry
->skb
->nf_bridge
) {
86 struct net_device
*physdev
;
88 physdev
= nf_bridge_get_physindev(entry
->skb
);
91 physdev
= nf_bridge_get_physoutdev(entry
->skb
);
97 EXPORT_SYMBOL_GPL(nf_queue_entry_get_refs
);
99 unsigned int nf_queue_nf_hook_drop(struct net
*net
)
101 const struct nf_queue_handler
*qh
;
102 unsigned int count
= 0;
105 qh
= rcu_dereference(net
->nf
.queue_handler
);
107 count
= qh
->nf_hook_drop(net
);
112 EXPORT_SYMBOL_GPL(nf_queue_nf_hook_drop
);
114 static int __nf_queue(struct sk_buff
*skb
, const struct nf_hook_state
*state
,
115 const struct nf_hook_entries
*entries
,
116 unsigned int index
, unsigned int queuenum
)
118 int status
= -ENOENT
;
119 struct nf_queue_entry
*entry
= NULL
;
120 const struct nf_afinfo
*afinfo
;
121 const struct nf_queue_handler
*qh
;
122 struct net
*net
= state
->net
;
124 /* QUEUE == DROP if no one is waiting, to be safe. */
125 qh
= rcu_dereference(net
->nf
.queue_handler
);
131 afinfo
= nf_get_afinfo(state
->pf
);
135 entry
= kmalloc(sizeof(*entry
) + afinfo
->route_key_size
, GFP_ATOMIC
);
141 *entry
= (struct nf_queue_entry
) {
145 .size
= sizeof(*entry
) + afinfo
->route_key_size
,
148 nf_queue_entry_get_refs(entry
);
150 afinfo
->saveroute(skb
, entry
);
151 status
= qh
->outfn(entry
, queuenum
);
154 nf_queue_entry_release_refs(entry
);
165 /* Packets leaving via this function must come back through nf_reinject(). */
166 int nf_queue(struct sk_buff
*skb
, struct nf_hook_state
*state
,
167 const struct nf_hook_entries
*entries
, unsigned int index
,
168 unsigned int verdict
)
172 ret
= __nf_queue(skb
, state
, entries
, index
, verdict
>> NF_VERDICT_QBITS
);
175 (verdict
& NF_VERDICT_FLAG_QUEUE_BYPASS
))
183 static unsigned int nf_iterate(struct sk_buff
*skb
,
184 struct nf_hook_state
*state
,
185 const struct nf_hook_entries
*hooks
,
188 const struct nf_hook_entry
*hook
;
189 unsigned int verdict
, i
= *index
;
191 while (i
< hooks
->num_hook_entries
) {
192 hook
= &hooks
->hooks
[i
];
194 verdict
= nf_hook_entry_hookfn(hook
, skb
, state
);
195 if (verdict
!= NF_ACCEPT
) {
196 if (verdict
!= NF_REPEAT
)
207 /* Caller must hold rcu read-side lock */
208 void nf_reinject(struct nf_queue_entry
*entry
, unsigned int verdict
)
210 const struct nf_hook_entry
*hook_entry
;
211 const struct nf_hook_entries
*hooks
;
212 struct sk_buff
*skb
= entry
->skb
;
213 const struct nf_afinfo
*afinfo
;
214 const struct net
*net
;
219 net
= entry
->state
.net
;
220 pf
= entry
->state
.pf
;
222 hooks
= rcu_dereference(net
->nf
.hooks
[pf
][entry
->state
.hook
]);
224 nf_queue_entry_release_refs(entry
);
226 i
= entry
->hook_index
;
227 if (WARN_ON_ONCE(i
>= hooks
->num_hook_entries
)) {
233 hook_entry
= &hooks
->hooks
[i
];
235 /* Continue traversal iff userspace said ok... */
236 if (verdict
== NF_REPEAT
)
237 verdict
= nf_hook_entry_hookfn(hook_entry
, skb
, &entry
->state
);
239 if (verdict
== NF_ACCEPT
) {
240 afinfo
= nf_get_afinfo(entry
->state
.pf
);
241 if (!afinfo
|| afinfo
->reroute(entry
->state
.net
, skb
, entry
) < 0)
245 if (verdict
== NF_ACCEPT
) {
248 verdict
= nf_iterate(skb
, &entry
->state
, hooks
, &i
);
251 switch (verdict
& NF_VERDICT_MASK
) {
255 entry
->state
.okfn(entry
->state
.net
, entry
->state
.sk
, skb
);
259 err
= nf_queue(skb
, &entry
->state
, hooks
, i
, verdict
);
271 EXPORT_SYMBOL(nf_reinject
);