IB/ipath: core device driver
[linux-2.6/verdex.git] / net / netfilter / nf_queue.c
blobd9f0d7ef103b61f8209eaa9fd9aaa29306438309
1 #include <linux/config.h>
2 #include <linux/kernel.h>
3 #include <linux/init.h>
4 #include <linux/module.h>
5 #include <linux/proc_fs.h>
6 #include <linux/skbuff.h>
7 #include <linux/netfilter.h>
8 #include <linux/seq_file.h>
9 #include <linux/rcupdate.h>
10 #include <net/protocol.h>
12 #include "nf_internals.h"
14 /*
15 * A queue handler may be registered for each protocol. Each is protected by
16 * long term mutex. The handler must provide an an outfn() to accept packets
17 * for queueing and must reinject all packets it receives, no matter what.
19 static struct nf_queue_handler *queue_handler[NPROTO];
20 static struct nf_queue_rerouter *queue_rerouter[NPROTO];
22 static DEFINE_RWLOCK(queue_handler_lock);
24 /* return EBUSY when somebody else is registered, return EEXIST if the
25 * same handler is registered, return 0 in case of success. */
26 int nf_register_queue_handler(int pf, struct nf_queue_handler *qh)
28 int ret;
30 if (pf >= NPROTO)
31 return -EINVAL;
33 write_lock_bh(&queue_handler_lock);
34 if (queue_handler[pf] == qh)
35 ret = -EEXIST;
36 else if (queue_handler[pf])
37 ret = -EBUSY;
38 else {
39 queue_handler[pf] = qh;
40 ret = 0;
42 write_unlock_bh(&queue_handler_lock);
44 return ret;
46 EXPORT_SYMBOL(nf_register_queue_handler);
48 /* The caller must flush their queue before this */
49 int nf_unregister_queue_handler(int pf)
51 if (pf >= NPROTO)
52 return -EINVAL;
54 write_lock_bh(&queue_handler_lock);
55 queue_handler[pf] = NULL;
56 write_unlock_bh(&queue_handler_lock);
58 return 0;
60 EXPORT_SYMBOL(nf_unregister_queue_handler);
62 int nf_register_queue_rerouter(int pf, struct nf_queue_rerouter *rer)
64 if (pf >= NPROTO)
65 return -EINVAL;
67 write_lock_bh(&queue_handler_lock);
68 rcu_assign_pointer(queue_rerouter[pf], rer);
69 write_unlock_bh(&queue_handler_lock);
71 return 0;
73 EXPORT_SYMBOL_GPL(nf_register_queue_rerouter);
75 int nf_unregister_queue_rerouter(int pf)
77 if (pf >= NPROTO)
78 return -EINVAL;
80 write_lock_bh(&queue_handler_lock);
81 rcu_assign_pointer(queue_rerouter[pf], NULL);
82 write_unlock_bh(&queue_handler_lock);
83 synchronize_rcu();
84 return 0;
86 EXPORT_SYMBOL_GPL(nf_unregister_queue_rerouter);
88 void nf_unregister_queue_handlers(struct nf_queue_handler *qh)
90 int pf;
92 write_lock_bh(&queue_handler_lock);
93 for (pf = 0; pf < NPROTO; pf++) {
94 if (queue_handler[pf] == qh)
95 queue_handler[pf] = NULL;
97 write_unlock_bh(&queue_handler_lock);
99 EXPORT_SYMBOL_GPL(nf_unregister_queue_handlers);
102 * Any packet that leaves via this function must come back
103 * through nf_reinject().
105 int nf_queue(struct sk_buff **skb,
106 struct list_head *elem,
107 int pf, unsigned int hook,
108 struct net_device *indev,
109 struct net_device *outdev,
110 int (*okfn)(struct sk_buff *),
111 unsigned int queuenum)
113 int status;
114 struct nf_info *info;
115 #ifdef CONFIG_BRIDGE_NETFILTER
116 struct net_device *physindev = NULL;
117 struct net_device *physoutdev = NULL;
118 #endif
119 struct nf_queue_rerouter *rerouter;
121 /* QUEUE == DROP if noone is waiting, to be safe. */
122 read_lock(&queue_handler_lock);
123 if (!queue_handler[pf]) {
124 read_unlock(&queue_handler_lock);
125 kfree_skb(*skb);
126 return 1;
129 info = kmalloc(sizeof(*info)+queue_rerouter[pf]->rer_size, GFP_ATOMIC);
130 if (!info) {
131 if (net_ratelimit())
132 printk(KERN_ERR "OOM queueing packet %p\n",
133 *skb);
134 read_unlock(&queue_handler_lock);
135 kfree_skb(*skb);
136 return 1;
139 *info = (struct nf_info) {
140 (struct nf_hook_ops *)elem, pf, hook, indev, outdev, okfn };
142 /* If it's going away, ignore hook. */
143 if (!try_module_get(info->elem->owner)) {
144 read_unlock(&queue_handler_lock);
145 kfree(info);
146 return 0;
149 /* Bump dev refs so they don't vanish while packet is out */
150 if (indev) dev_hold(indev);
151 if (outdev) dev_hold(outdev);
153 #ifdef CONFIG_BRIDGE_NETFILTER
154 if ((*skb)->nf_bridge) {
155 physindev = (*skb)->nf_bridge->physindev;
156 if (physindev) dev_hold(physindev);
157 physoutdev = (*skb)->nf_bridge->physoutdev;
158 if (physoutdev) dev_hold(physoutdev);
160 #endif
161 rerouter = rcu_dereference(queue_rerouter[pf]);
162 if (rerouter)
163 rerouter->save(*skb, info);
165 status = queue_handler[pf]->outfn(*skb, info, queuenum,
166 queue_handler[pf]->data);
168 read_unlock(&queue_handler_lock);
170 if (status < 0) {
171 /* James M doesn't say fuck enough. */
172 if (indev) dev_put(indev);
173 if (outdev) dev_put(outdev);
174 #ifdef CONFIG_BRIDGE_NETFILTER
175 if (physindev) dev_put(physindev);
176 if (physoutdev) dev_put(physoutdev);
177 #endif
178 module_put(info->elem->owner);
179 kfree(info);
180 kfree_skb(*skb);
182 return 1;
185 return 1;
188 void nf_reinject(struct sk_buff *skb, struct nf_info *info,
189 unsigned int verdict)
191 struct list_head *elem = &info->elem->list;
192 struct list_head *i;
193 struct nf_queue_rerouter *rerouter;
195 rcu_read_lock();
197 /* Release those devices we held, or Alexey will kill me. */
198 if (info->indev) dev_put(info->indev);
199 if (info->outdev) dev_put(info->outdev);
200 #ifdef CONFIG_BRIDGE_NETFILTER
201 if (skb->nf_bridge) {
202 if (skb->nf_bridge->physindev)
203 dev_put(skb->nf_bridge->physindev);
204 if (skb->nf_bridge->physoutdev)
205 dev_put(skb->nf_bridge->physoutdev);
207 #endif
209 /* Drop reference to owner of hook which queued us. */
210 module_put(info->elem->owner);
212 list_for_each_rcu(i, &nf_hooks[info->pf][info->hook]) {
213 if (i == elem)
214 break;
217 if (i == &nf_hooks[info->pf][info->hook]) {
218 /* The module which sent it to userspace is gone. */
219 NFDEBUG("%s: module disappeared, dropping packet.\n",
220 __FUNCTION__);
221 verdict = NF_DROP;
224 /* Continue traversal iff userspace said ok... */
225 if (verdict == NF_REPEAT) {
226 elem = elem->prev;
227 verdict = NF_ACCEPT;
230 if (verdict == NF_ACCEPT) {
231 rerouter = rcu_dereference(queue_rerouter[info->pf]);
232 if (rerouter && rerouter->reroute(&skb, info) < 0)
233 verdict = NF_DROP;
236 if (verdict == NF_ACCEPT) {
237 next_hook:
238 verdict = nf_iterate(&nf_hooks[info->pf][info->hook],
239 &skb, info->hook,
240 info->indev, info->outdev, &elem,
241 info->okfn, INT_MIN);
244 switch (verdict & NF_VERDICT_MASK) {
245 case NF_ACCEPT:
246 info->okfn(skb);
247 break;
249 case NF_QUEUE:
250 if (!nf_queue(&skb, elem, info->pf, info->hook,
251 info->indev, info->outdev, info->okfn,
252 verdict >> NF_VERDICT_BITS))
253 goto next_hook;
254 break;
256 rcu_read_unlock();
258 if (verdict == NF_DROP)
259 kfree_skb(skb);
261 kfree(info);
262 return;
264 EXPORT_SYMBOL(nf_reinject);
266 #ifdef CONFIG_PROC_FS
267 static void *seq_start(struct seq_file *seq, loff_t *pos)
269 if (*pos >= NPROTO)
270 return NULL;
272 return pos;
275 static void *seq_next(struct seq_file *s, void *v, loff_t *pos)
277 (*pos)++;
279 if (*pos >= NPROTO)
280 return NULL;
282 return pos;
285 static void seq_stop(struct seq_file *s, void *v)
290 static int seq_show(struct seq_file *s, void *v)
292 int ret;
293 loff_t *pos = v;
294 struct nf_queue_handler *qh;
296 read_lock_bh(&queue_handler_lock);
297 qh = queue_handler[*pos];
298 if (!qh)
299 ret = seq_printf(s, "%2lld NONE\n", *pos);
300 else
301 ret = seq_printf(s, "%2lld %s\n", *pos, qh->name);
302 read_unlock_bh(&queue_handler_lock);
304 return ret;
307 static struct seq_operations nfqueue_seq_ops = {
308 .start = seq_start,
309 .next = seq_next,
310 .stop = seq_stop,
311 .show = seq_show,
314 static int nfqueue_open(struct inode *inode, struct file *file)
316 return seq_open(file, &nfqueue_seq_ops);
319 static struct file_operations nfqueue_file_ops = {
320 .owner = THIS_MODULE,
321 .open = nfqueue_open,
322 .read = seq_read,
323 .llseek = seq_lseek,
324 .release = seq_release,
326 #endif /* PROC_FS */
329 int __init netfilter_queue_init(void)
331 #ifdef CONFIG_PROC_FS
332 struct proc_dir_entry *pde;
334 pde = create_proc_entry("nf_queue", S_IRUGO, proc_net_netfilter);
335 if (!pde)
336 return -1;
337 pde->proc_fops = &nfqueue_file_ops;
338 #endif
339 return 0;