w1_therm_read_bin: don't call flush_signals()
[linux/fpc-iii.git] / net / netfilter / nfnetlink_queue.c
blob7a97bec67729a3f258cdbc24425790a774feaf2c
1 /*
2 * This is a module which is used for queueing packets and communicating with
3 * userspace via nfetlink.
5 * (C) 2005 by Harald Welte <laforge@netfilter.org>
7 * Based on the old ipv4-only ip_queue.c:
8 * (C) 2000-2002 James Morris <jmorris@intercode.com.au>
9 * (C) 2003-2005 Netfilter Core Team <coreteam@netfilter.org>
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License version 2 as
13 * published by the Free Software Foundation.
16 #include <linux/module.h>
17 #include <linux/skbuff.h>
18 #include <linux/init.h>
19 #include <linux/spinlock.h>
20 #include <linux/notifier.h>
21 #include <linux/netdevice.h>
22 #include <linux/netfilter.h>
23 #include <linux/proc_fs.h>
24 #include <linux/netfilter_ipv4.h>
25 #include <linux/netfilter_ipv6.h>
26 #include <linux/netfilter/nfnetlink.h>
27 #include <linux/netfilter/nfnetlink_queue.h>
28 #include <linux/list.h>
29 #include <net/sock.h>
31 #include <asm/atomic.h>
33 #ifdef CONFIG_BRIDGE_NETFILTER
34 #include "../bridge/br_private.h"
35 #endif
37 #define NFQNL_QMAX_DEFAULT 1024
39 #if 0
40 #define QDEBUG(x, args ...) printk(KERN_DEBUG "%s(%d):%s(): " x, \
41 __FILE__, __LINE__, __FUNCTION__, \
42 ## args)
43 #else
44 #define QDEBUG(x, ...)
45 #endif
47 struct nfqnl_queue_entry {
48 struct list_head list;
49 struct nf_info *info;
50 struct sk_buff *skb;
51 unsigned int id;
54 struct nfqnl_instance {
55 struct hlist_node hlist; /* global list of queues */
56 atomic_t use;
58 int peer_pid;
59 unsigned int queue_maxlen;
60 unsigned int copy_range;
61 unsigned int queue_total;
62 unsigned int queue_dropped;
63 unsigned int queue_user_dropped;
65 atomic_t id_sequence; /* 'sequence' of pkt ids */
67 u_int16_t queue_num; /* number of this queue */
68 u_int8_t copy_mode;
70 spinlock_t lock;
72 struct list_head queue_list; /* packets in queue */
75 typedef int (*nfqnl_cmpfn)(struct nfqnl_queue_entry *, unsigned long);
77 static DEFINE_RWLOCK(instances_lock);
79 #define INSTANCE_BUCKETS 16
80 static struct hlist_head instance_table[INSTANCE_BUCKETS];
82 static inline u_int8_t instance_hashfn(u_int16_t queue_num)
84 return ((queue_num >> 8) | queue_num) % INSTANCE_BUCKETS;
87 static struct nfqnl_instance *
88 __instance_lookup(u_int16_t queue_num)
90 struct hlist_head *head;
91 struct hlist_node *pos;
92 struct nfqnl_instance *inst;
94 head = &instance_table[instance_hashfn(queue_num)];
95 hlist_for_each_entry(inst, pos, head, hlist) {
96 if (inst->queue_num == queue_num)
97 return inst;
99 return NULL;
102 static struct nfqnl_instance *
103 instance_lookup_get(u_int16_t queue_num)
105 struct nfqnl_instance *inst;
107 read_lock_bh(&instances_lock);
108 inst = __instance_lookup(queue_num);
109 if (inst)
110 atomic_inc(&inst->use);
111 read_unlock_bh(&instances_lock);
113 return inst;
116 static void
117 instance_put(struct nfqnl_instance *inst)
119 if (inst && atomic_dec_and_test(&inst->use)) {
120 QDEBUG("kfree(inst=%p)\n", inst);
121 kfree(inst);
125 static struct nfqnl_instance *
126 instance_create(u_int16_t queue_num, int pid)
128 struct nfqnl_instance *inst;
130 QDEBUG("entering for queue_num=%u, pid=%d\n", queue_num, pid);
132 write_lock_bh(&instances_lock);
133 if (__instance_lookup(queue_num)) {
134 inst = NULL;
135 QDEBUG("aborting, instance already exists\n");
136 goto out_unlock;
139 inst = kzalloc(sizeof(*inst), GFP_ATOMIC);
140 if (!inst)
141 goto out_unlock;
143 inst->queue_num = queue_num;
144 inst->peer_pid = pid;
145 inst->queue_maxlen = NFQNL_QMAX_DEFAULT;
146 inst->copy_range = 0xfffff;
147 inst->copy_mode = NFQNL_COPY_NONE;
148 atomic_set(&inst->id_sequence, 0);
149 /* needs to be two, since we _put() after creation */
150 atomic_set(&inst->use, 2);
151 spin_lock_init(&inst->lock);
152 INIT_LIST_HEAD(&inst->queue_list);
154 if (!try_module_get(THIS_MODULE))
155 goto out_free;
157 hlist_add_head(&inst->hlist,
158 &instance_table[instance_hashfn(queue_num)]);
160 write_unlock_bh(&instances_lock);
162 QDEBUG("successfully created new instance\n");
164 return inst;
166 out_free:
167 kfree(inst);
168 out_unlock:
169 write_unlock_bh(&instances_lock);
170 return NULL;
173 static void nfqnl_flush(struct nfqnl_instance *queue, int verdict);
175 static void
176 _instance_destroy2(struct nfqnl_instance *inst, int lock)
178 /* first pull it out of the global list */
179 if (lock)
180 write_lock_bh(&instances_lock);
182 QDEBUG("removing instance %p (queuenum=%u) from hash\n",
183 inst, inst->queue_num);
184 hlist_del(&inst->hlist);
186 if (lock)
187 write_unlock_bh(&instances_lock);
189 /* then flush all pending skbs from the queue */
190 nfqnl_flush(inst, NF_DROP);
192 /* and finally put the refcount */
193 instance_put(inst);
195 module_put(THIS_MODULE);
198 static inline void
199 __instance_destroy(struct nfqnl_instance *inst)
201 _instance_destroy2(inst, 0);
204 static inline void
205 instance_destroy(struct nfqnl_instance *inst)
207 _instance_destroy2(inst, 1);
212 static void
213 issue_verdict(struct nfqnl_queue_entry *entry, int verdict)
215 QDEBUG("entering for entry %p, verdict %u\n", entry, verdict);
217 /* TCP input path (and probably other bits) assume to be called
218 * from softirq context, not from syscall, like issue_verdict is
219 * called. TCP input path deadlocks with locks taken from timer
220 * softirq, e.g. We therefore emulate this by local_bh_disable() */
222 local_bh_disable();
223 nf_reinject(entry->skb, entry->info, verdict);
224 local_bh_enable();
226 kfree(entry);
229 static inline void
230 __enqueue_entry(struct nfqnl_instance *queue,
231 struct nfqnl_queue_entry *entry)
233 list_add(&entry->list, &queue->queue_list);
234 queue->queue_total++;
238 * Find and return a queued entry matched by cmpfn, or return the last
239 * entry if cmpfn is NULL.
241 static inline struct nfqnl_queue_entry *
242 __find_entry(struct nfqnl_instance *queue, nfqnl_cmpfn cmpfn,
243 unsigned long data)
245 struct list_head *p;
247 list_for_each_prev(p, &queue->queue_list) {
248 struct nfqnl_queue_entry *entry = (struct nfqnl_queue_entry *)p;
250 if (!cmpfn || cmpfn(entry, data))
251 return entry;
253 return NULL;
256 static inline void
257 __dequeue_entry(struct nfqnl_instance *q, struct nfqnl_queue_entry *entry)
259 list_del(&entry->list);
260 q->queue_total--;
263 static inline struct nfqnl_queue_entry *
264 __find_dequeue_entry(struct nfqnl_instance *queue,
265 nfqnl_cmpfn cmpfn, unsigned long data)
267 struct nfqnl_queue_entry *entry;
269 entry = __find_entry(queue, cmpfn, data);
270 if (entry == NULL)
271 return NULL;
273 __dequeue_entry(queue, entry);
274 return entry;
278 static inline void
279 __nfqnl_flush(struct nfqnl_instance *queue, int verdict)
281 struct nfqnl_queue_entry *entry;
283 while ((entry = __find_dequeue_entry(queue, NULL, 0)))
284 issue_verdict(entry, verdict);
287 static inline int
288 __nfqnl_set_mode(struct nfqnl_instance *queue,
289 unsigned char mode, unsigned int range)
291 int status = 0;
293 switch (mode) {
294 case NFQNL_COPY_NONE:
295 case NFQNL_COPY_META:
296 queue->copy_mode = mode;
297 queue->copy_range = 0;
298 break;
300 case NFQNL_COPY_PACKET:
301 queue->copy_mode = mode;
302 /* we're using struct nfattr which has 16bit nfa_len */
303 if (range > 0xffff)
304 queue->copy_range = 0xffff;
305 else
306 queue->copy_range = range;
307 break;
309 default:
310 status = -EINVAL;
313 return status;
316 static struct nfqnl_queue_entry *
317 find_dequeue_entry(struct nfqnl_instance *queue,
318 nfqnl_cmpfn cmpfn, unsigned long data)
320 struct nfqnl_queue_entry *entry;
322 spin_lock_bh(&queue->lock);
323 entry = __find_dequeue_entry(queue, cmpfn, data);
324 spin_unlock_bh(&queue->lock);
326 return entry;
329 static void
330 nfqnl_flush(struct nfqnl_instance *queue, int verdict)
332 spin_lock_bh(&queue->lock);
333 __nfqnl_flush(queue, verdict);
334 spin_unlock_bh(&queue->lock);
337 static struct sk_buff *
338 nfqnl_build_packet_message(struct nfqnl_instance *queue,
339 struct nfqnl_queue_entry *entry, int *errp)
341 sk_buff_data_t old_tail;
342 size_t size;
343 size_t data_len = 0;
344 struct sk_buff *skb;
345 struct nfqnl_msg_packet_hdr pmsg;
346 struct nlmsghdr *nlh;
347 struct nfgenmsg *nfmsg;
348 struct nf_info *entinf = entry->info;
349 struct sk_buff *entskb = entry->skb;
350 struct net_device *indev;
351 struct net_device *outdev;
352 __be32 tmp_uint;
354 QDEBUG("entered\n");
356 /* all macros expand to constant values at compile time */
357 size = NLMSG_SPACE(sizeof(struct nfgenmsg)) +
358 + NFA_SPACE(sizeof(struct nfqnl_msg_packet_hdr))
359 + NFA_SPACE(sizeof(u_int32_t)) /* ifindex */
360 + NFA_SPACE(sizeof(u_int32_t)) /* ifindex */
361 #ifdef CONFIG_BRIDGE_NETFILTER
362 + NFA_SPACE(sizeof(u_int32_t)) /* ifindex */
363 + NFA_SPACE(sizeof(u_int32_t)) /* ifindex */
364 #endif
365 + NFA_SPACE(sizeof(u_int32_t)) /* mark */
366 + NFA_SPACE(sizeof(struct nfqnl_msg_packet_hw))
367 + NFA_SPACE(sizeof(struct nfqnl_msg_packet_timestamp));
369 outdev = entinf->outdev;
371 spin_lock_bh(&queue->lock);
373 switch (queue->copy_mode) {
374 case NFQNL_COPY_META:
375 case NFQNL_COPY_NONE:
376 data_len = 0;
377 break;
379 case NFQNL_COPY_PACKET:
380 if ((entskb->ip_summed == CHECKSUM_PARTIAL ||
381 entskb->ip_summed == CHECKSUM_COMPLETE) &&
382 (*errp = skb_checksum_help(entskb))) {
383 spin_unlock_bh(&queue->lock);
384 return NULL;
386 if (queue->copy_range == 0
387 || queue->copy_range > entskb->len)
388 data_len = entskb->len;
389 else
390 data_len = queue->copy_range;
392 size += NFA_SPACE(data_len);
393 break;
395 default:
396 *errp = -EINVAL;
397 spin_unlock_bh(&queue->lock);
398 return NULL;
401 spin_unlock_bh(&queue->lock);
403 skb = alloc_skb(size, GFP_ATOMIC);
404 if (!skb)
405 goto nlmsg_failure;
407 old_tail = skb->tail;
408 nlh = NLMSG_PUT(skb, 0, 0,
409 NFNL_SUBSYS_QUEUE << 8 | NFQNL_MSG_PACKET,
410 sizeof(struct nfgenmsg));
411 nfmsg = NLMSG_DATA(nlh);
412 nfmsg->nfgen_family = entinf->pf;
413 nfmsg->version = NFNETLINK_V0;
414 nfmsg->res_id = htons(queue->queue_num);
416 pmsg.packet_id = htonl(entry->id);
417 pmsg.hw_protocol = entskb->protocol;
418 pmsg.hook = entinf->hook;
420 NFA_PUT(skb, NFQA_PACKET_HDR, sizeof(pmsg), &pmsg);
422 indev = entinf->indev;
423 if (indev) {
424 tmp_uint = htonl(indev->ifindex);
425 #ifndef CONFIG_BRIDGE_NETFILTER
426 NFA_PUT(skb, NFQA_IFINDEX_INDEV, sizeof(tmp_uint), &tmp_uint);
427 #else
428 if (entinf->pf == PF_BRIDGE) {
429 /* Case 1: indev is physical input device, we need to
430 * look for bridge group (when called from
431 * netfilter_bridge) */
432 NFA_PUT(skb, NFQA_IFINDEX_PHYSINDEV, sizeof(tmp_uint),
433 &tmp_uint);
434 /* this is the bridge group "brX" */
435 tmp_uint = htonl(indev->br_port->br->dev->ifindex);
436 NFA_PUT(skb, NFQA_IFINDEX_INDEV, sizeof(tmp_uint),
437 &tmp_uint);
438 } else {
439 /* Case 2: indev is bridge group, we need to look for
440 * physical device (when called from ipv4) */
441 NFA_PUT(skb, NFQA_IFINDEX_INDEV, sizeof(tmp_uint),
442 &tmp_uint);
443 if (entskb->nf_bridge
444 && entskb->nf_bridge->physindev) {
445 tmp_uint = htonl(entskb->nf_bridge->physindev->ifindex);
446 NFA_PUT(skb, NFQA_IFINDEX_PHYSINDEV,
447 sizeof(tmp_uint), &tmp_uint);
450 #endif
453 if (outdev) {
454 tmp_uint = htonl(outdev->ifindex);
455 #ifndef CONFIG_BRIDGE_NETFILTER
456 NFA_PUT(skb, NFQA_IFINDEX_OUTDEV, sizeof(tmp_uint), &tmp_uint);
457 #else
458 if (entinf->pf == PF_BRIDGE) {
459 /* Case 1: outdev is physical output device, we need to
460 * look for bridge group (when called from
461 * netfilter_bridge) */
462 NFA_PUT(skb, NFQA_IFINDEX_PHYSOUTDEV, sizeof(tmp_uint),
463 &tmp_uint);
464 /* this is the bridge group "brX" */
465 tmp_uint = htonl(outdev->br_port->br->dev->ifindex);
466 NFA_PUT(skb, NFQA_IFINDEX_OUTDEV, sizeof(tmp_uint),
467 &tmp_uint);
468 } else {
469 /* Case 2: outdev is bridge group, we need to look for
470 * physical output device (when called from ipv4) */
471 NFA_PUT(skb, NFQA_IFINDEX_OUTDEV, sizeof(tmp_uint),
472 &tmp_uint);
473 if (entskb->nf_bridge
474 && entskb->nf_bridge->physoutdev) {
475 tmp_uint = htonl(entskb->nf_bridge->physoutdev->ifindex);
476 NFA_PUT(skb, NFQA_IFINDEX_PHYSOUTDEV,
477 sizeof(tmp_uint), &tmp_uint);
480 #endif
483 if (entskb->mark) {
484 tmp_uint = htonl(entskb->mark);
485 NFA_PUT(skb, NFQA_MARK, sizeof(u_int32_t), &tmp_uint);
488 if (indev && entskb->dev
489 && entskb->dev->hard_header_parse) {
490 struct nfqnl_msg_packet_hw phw;
492 int len = entskb->dev->hard_header_parse(entskb,
493 phw.hw_addr);
494 phw.hw_addrlen = htons(len);
495 NFA_PUT(skb, NFQA_HWADDR, sizeof(phw), &phw);
498 if (entskb->tstamp.tv64) {
499 struct nfqnl_msg_packet_timestamp ts;
500 struct timeval tv = ktime_to_timeval(entskb->tstamp);
501 ts.sec = cpu_to_be64(tv.tv_sec);
502 ts.usec = cpu_to_be64(tv.tv_usec);
504 NFA_PUT(skb, NFQA_TIMESTAMP, sizeof(ts), &ts);
507 if (data_len) {
508 struct nfattr *nfa;
509 int size = NFA_LENGTH(data_len);
511 if (skb_tailroom(skb) < (int)NFA_SPACE(data_len)) {
512 printk(KERN_WARNING "nf_queue: no tailroom!\n");
513 goto nlmsg_failure;
516 nfa = (struct nfattr *)skb_put(skb, NFA_ALIGN(size));
517 nfa->nfa_type = NFQA_PAYLOAD;
518 nfa->nfa_len = size;
520 if (skb_copy_bits(entskb, 0, NFA_DATA(nfa), data_len))
521 BUG();
524 nlh->nlmsg_len = skb->tail - old_tail;
525 return skb;
527 nlmsg_failure:
528 nfattr_failure:
529 if (skb)
530 kfree_skb(skb);
531 *errp = -EINVAL;
532 if (net_ratelimit())
533 printk(KERN_ERR "nf_queue: error creating packet message\n");
534 return NULL;
537 static int
538 nfqnl_enqueue_packet(struct sk_buff *skb, struct nf_info *info,
539 unsigned int queuenum, void *data)
541 int status = -EINVAL;
542 struct sk_buff *nskb;
543 struct nfqnl_instance *queue;
544 struct nfqnl_queue_entry *entry;
546 QDEBUG("entered\n");
548 queue = instance_lookup_get(queuenum);
549 if (!queue) {
550 QDEBUG("no queue instance matching\n");
551 return -EINVAL;
554 if (queue->copy_mode == NFQNL_COPY_NONE) {
555 QDEBUG("mode COPY_NONE, aborting\n");
556 status = -EAGAIN;
557 goto err_out_put;
560 entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
561 if (entry == NULL) {
562 if (net_ratelimit())
563 printk(KERN_ERR
564 "nf_queue: OOM in nfqnl_enqueue_packet()\n");
565 status = -ENOMEM;
566 goto err_out_put;
569 entry->info = info;
570 entry->skb = skb;
571 entry->id = atomic_inc_return(&queue->id_sequence);
573 nskb = nfqnl_build_packet_message(queue, entry, &status);
574 if (nskb == NULL)
575 goto err_out_free;
577 spin_lock_bh(&queue->lock);
579 if (!queue->peer_pid)
580 goto err_out_free_nskb;
582 if (queue->queue_total >= queue->queue_maxlen) {
583 queue->queue_dropped++;
584 status = -ENOSPC;
585 if (net_ratelimit())
586 printk(KERN_WARNING "nf_queue: full at %d entries, "
587 "dropping packets(s). Dropped: %d\n",
588 queue->queue_total, queue->queue_dropped);
589 goto err_out_free_nskb;
592 /* nfnetlink_unicast will either free the nskb or add it to a socket */
593 status = nfnetlink_unicast(nskb, queue->peer_pid, MSG_DONTWAIT);
594 if (status < 0) {
595 queue->queue_user_dropped++;
596 goto err_out_unlock;
599 __enqueue_entry(queue, entry);
601 spin_unlock_bh(&queue->lock);
602 instance_put(queue);
603 return status;
605 err_out_free_nskb:
606 kfree_skb(nskb);
608 err_out_unlock:
609 spin_unlock_bh(&queue->lock);
611 err_out_free:
612 kfree(entry);
613 err_out_put:
614 instance_put(queue);
615 return status;
618 static int
619 nfqnl_mangle(void *data, int data_len, struct nfqnl_queue_entry *e)
621 int diff;
623 diff = data_len - e->skb->len;
624 if (diff < 0) {
625 if (pskb_trim(e->skb, data_len))
626 return -ENOMEM;
627 } else if (diff > 0) {
628 if (data_len > 0xFFFF)
629 return -EINVAL;
630 if (diff > skb_tailroom(e->skb)) {
631 struct sk_buff *newskb;
633 newskb = skb_copy_expand(e->skb,
634 skb_headroom(e->skb),
635 diff,
636 GFP_ATOMIC);
637 if (newskb == NULL) {
638 printk(KERN_WARNING "nf_queue: OOM "
639 "in mangle, dropping packet\n");
640 return -ENOMEM;
642 if (e->skb->sk)
643 skb_set_owner_w(newskb, e->skb->sk);
644 kfree_skb(e->skb);
645 e->skb = newskb;
647 skb_put(e->skb, diff);
649 if (!skb_make_writable(&e->skb, data_len))
650 return -ENOMEM;
651 skb_copy_to_linear_data(e->skb, data, data_len);
652 e->skb->ip_summed = CHECKSUM_NONE;
653 return 0;
656 static inline int
657 id_cmp(struct nfqnl_queue_entry *e, unsigned long id)
659 return (id == e->id);
662 static int
663 nfqnl_set_mode(struct nfqnl_instance *queue,
664 unsigned char mode, unsigned int range)
666 int status;
668 spin_lock_bh(&queue->lock);
669 status = __nfqnl_set_mode(queue, mode, range);
670 spin_unlock_bh(&queue->lock);
672 return status;
675 static int
676 dev_cmp(struct nfqnl_queue_entry *entry, unsigned long ifindex)
678 struct nf_info *entinf = entry->info;
680 if (entinf->indev)
681 if (entinf->indev->ifindex == ifindex)
682 return 1;
683 if (entinf->outdev)
684 if (entinf->outdev->ifindex == ifindex)
685 return 1;
686 #ifdef CONFIG_BRIDGE_NETFILTER
687 if (entry->skb->nf_bridge) {
688 if (entry->skb->nf_bridge->physindev &&
689 entry->skb->nf_bridge->physindev->ifindex == ifindex)
690 return 1;
691 if (entry->skb->nf_bridge->physoutdev &&
692 entry->skb->nf_bridge->physoutdev->ifindex == ifindex)
693 return 1;
695 #endif
696 return 0;
699 /* drop all packets with either indev or outdev == ifindex from all queue
700 * instances */
701 static void
702 nfqnl_dev_drop(int ifindex)
704 int i;
706 QDEBUG("entering for ifindex %u\n", ifindex);
708 /* this only looks like we have to hold the readlock for a way too long
709 * time, issue_verdict(), nf_reinject(), ... - but we always only
710 * issue NF_DROP, which is processed directly in nf_reinject() */
711 read_lock_bh(&instances_lock);
713 for (i = 0; i < INSTANCE_BUCKETS; i++) {
714 struct hlist_node *tmp;
715 struct nfqnl_instance *inst;
716 struct hlist_head *head = &instance_table[i];
718 hlist_for_each_entry(inst, tmp, head, hlist) {
719 struct nfqnl_queue_entry *entry;
720 while ((entry = find_dequeue_entry(inst, dev_cmp,
721 ifindex)) != NULL)
722 issue_verdict(entry, NF_DROP);
726 read_unlock_bh(&instances_lock);
729 #define RCV_SKB_FAIL(err) do { netlink_ack(skb, nlh, (err)); return; } while (0)
731 static int
732 nfqnl_rcv_dev_event(struct notifier_block *this,
733 unsigned long event, void *ptr)
735 struct net_device *dev = ptr;
737 /* Drop any packets associated with the downed device */
738 if (event == NETDEV_DOWN)
739 nfqnl_dev_drop(dev->ifindex);
740 return NOTIFY_DONE;
743 static struct notifier_block nfqnl_dev_notifier = {
744 .notifier_call = nfqnl_rcv_dev_event,
747 static int
748 nfqnl_rcv_nl_event(struct notifier_block *this,
749 unsigned long event, void *ptr)
751 struct netlink_notify *n = ptr;
753 if (event == NETLINK_URELEASE &&
754 n->protocol == NETLINK_NETFILTER && n->pid) {
755 int i;
757 /* destroy all instances for this pid */
758 write_lock_bh(&instances_lock);
759 for (i = 0; i < INSTANCE_BUCKETS; i++) {
760 struct hlist_node *tmp, *t2;
761 struct nfqnl_instance *inst;
762 struct hlist_head *head = &instance_table[i];
764 hlist_for_each_entry_safe(inst, tmp, t2, head, hlist) {
765 if (n->pid == inst->peer_pid)
766 __instance_destroy(inst);
769 write_unlock_bh(&instances_lock);
771 return NOTIFY_DONE;
774 static struct notifier_block nfqnl_rtnl_notifier = {
775 .notifier_call = nfqnl_rcv_nl_event,
778 static const int nfqa_verdict_min[NFQA_MAX] = {
779 [NFQA_VERDICT_HDR-1] = sizeof(struct nfqnl_msg_verdict_hdr),
780 [NFQA_MARK-1] = sizeof(u_int32_t),
781 [NFQA_PAYLOAD-1] = 0,
784 static int
785 nfqnl_recv_verdict(struct sock *ctnl, struct sk_buff *skb,
786 struct nlmsghdr *nlh, struct nfattr *nfqa[])
788 struct nfgenmsg *nfmsg = NLMSG_DATA(nlh);
789 u_int16_t queue_num = ntohs(nfmsg->res_id);
791 struct nfqnl_msg_verdict_hdr *vhdr;
792 struct nfqnl_instance *queue;
793 unsigned int verdict;
794 struct nfqnl_queue_entry *entry;
795 int err;
797 if (nfattr_bad_size(nfqa, NFQA_MAX, nfqa_verdict_min)) {
798 QDEBUG("bad attribute size\n");
799 return -EINVAL;
802 queue = instance_lookup_get(queue_num);
803 if (!queue)
804 return -ENODEV;
806 if (queue->peer_pid != NETLINK_CB(skb).pid) {
807 err = -EPERM;
808 goto err_out_put;
811 if (!nfqa[NFQA_VERDICT_HDR-1]) {
812 err = -EINVAL;
813 goto err_out_put;
816 vhdr = NFA_DATA(nfqa[NFQA_VERDICT_HDR-1]);
817 verdict = ntohl(vhdr->verdict);
819 if ((verdict & NF_VERDICT_MASK) > NF_MAX_VERDICT) {
820 err = -EINVAL;
821 goto err_out_put;
824 entry = find_dequeue_entry(queue, id_cmp, ntohl(vhdr->id));
825 if (entry == NULL) {
826 err = -ENOENT;
827 goto err_out_put;
830 if (nfqa[NFQA_PAYLOAD-1]) {
831 if (nfqnl_mangle(NFA_DATA(nfqa[NFQA_PAYLOAD-1]),
832 NFA_PAYLOAD(nfqa[NFQA_PAYLOAD-1]), entry) < 0)
833 verdict = NF_DROP;
836 if (nfqa[NFQA_MARK-1])
837 entry->skb->mark = ntohl(*(__be32 *)
838 NFA_DATA(nfqa[NFQA_MARK-1]));
840 issue_verdict(entry, verdict);
841 instance_put(queue);
842 return 0;
844 err_out_put:
845 instance_put(queue);
846 return err;
849 static int
850 nfqnl_recv_unsupp(struct sock *ctnl, struct sk_buff *skb,
851 struct nlmsghdr *nlh, struct nfattr *nfqa[])
853 return -ENOTSUPP;
856 static const int nfqa_cfg_min[NFQA_CFG_MAX] = {
857 [NFQA_CFG_CMD-1] = sizeof(struct nfqnl_msg_config_cmd),
858 [NFQA_CFG_PARAMS-1] = sizeof(struct nfqnl_msg_config_params),
861 static struct nf_queue_handler nfqh = {
862 .name = "nf_queue",
863 .outfn = &nfqnl_enqueue_packet,
866 static int
867 nfqnl_recv_config(struct sock *ctnl, struct sk_buff *skb,
868 struct nlmsghdr *nlh, struct nfattr *nfqa[])
870 struct nfgenmsg *nfmsg = NLMSG_DATA(nlh);
871 u_int16_t queue_num = ntohs(nfmsg->res_id);
872 struct nfqnl_instance *queue;
873 int ret = 0;
875 QDEBUG("entering for msg %u\n", NFNL_MSG_TYPE(nlh->nlmsg_type));
877 if (nfattr_bad_size(nfqa, NFQA_CFG_MAX, nfqa_cfg_min)) {
878 QDEBUG("bad attribute size\n");
879 return -EINVAL;
882 queue = instance_lookup_get(queue_num);
883 if (nfqa[NFQA_CFG_CMD-1]) {
884 struct nfqnl_msg_config_cmd *cmd;
885 cmd = NFA_DATA(nfqa[NFQA_CFG_CMD-1]);
886 QDEBUG("found CFG_CMD\n");
888 switch (cmd->command) {
889 case NFQNL_CFG_CMD_BIND:
890 if (queue)
891 return -EBUSY;
893 queue = instance_create(queue_num, NETLINK_CB(skb).pid);
894 if (!queue)
895 return -EINVAL;
896 break;
897 case NFQNL_CFG_CMD_UNBIND:
898 if (!queue)
899 return -ENODEV;
901 if (queue->peer_pid != NETLINK_CB(skb).pid) {
902 ret = -EPERM;
903 goto out_put;
906 instance_destroy(queue);
907 break;
908 case NFQNL_CFG_CMD_PF_BIND:
909 QDEBUG("registering queue handler for pf=%u\n",
910 ntohs(cmd->pf));
911 ret = nf_register_queue_handler(ntohs(cmd->pf), &nfqh);
912 break;
913 case NFQNL_CFG_CMD_PF_UNBIND:
914 QDEBUG("unregistering queue handler for pf=%u\n",
915 ntohs(cmd->pf));
916 /* This is a bug and a feature. We can unregister
917 * other handlers(!) */
918 ret = nf_unregister_queue_handler(ntohs(cmd->pf));
919 break;
920 default:
921 ret = -EINVAL;
922 break;
924 } else {
925 if (!queue) {
926 QDEBUG("no config command, and no instance ENOENT\n");
927 ret = -ENOENT;
928 goto out_put;
931 if (queue->peer_pid != NETLINK_CB(skb).pid) {
932 QDEBUG("no config command, and wrong pid\n");
933 ret = -EPERM;
934 goto out_put;
938 if (nfqa[NFQA_CFG_PARAMS-1]) {
939 struct nfqnl_msg_config_params *params;
941 if (!queue) {
942 ret = -ENOENT;
943 goto out_put;
945 params = NFA_DATA(nfqa[NFQA_CFG_PARAMS-1]);
946 nfqnl_set_mode(queue, params->copy_mode,
947 ntohl(params->copy_range));
950 if (nfqa[NFQA_CFG_QUEUE_MAXLEN-1]) {
951 __be32 *queue_maxlen;
952 queue_maxlen = NFA_DATA(nfqa[NFQA_CFG_QUEUE_MAXLEN-1]);
953 spin_lock_bh(&queue->lock);
954 queue->queue_maxlen = ntohl(*queue_maxlen);
955 spin_unlock_bh(&queue->lock);
958 out_put:
959 instance_put(queue);
960 return ret;
963 static struct nfnl_callback nfqnl_cb[NFQNL_MSG_MAX] = {
964 [NFQNL_MSG_PACKET] = { .call = nfqnl_recv_unsupp,
965 .attr_count = NFQA_MAX, },
966 [NFQNL_MSG_VERDICT] = { .call = nfqnl_recv_verdict,
967 .attr_count = NFQA_MAX, },
968 [NFQNL_MSG_CONFIG] = { .call = nfqnl_recv_config,
969 .attr_count = NFQA_CFG_MAX, },
972 static struct nfnetlink_subsystem nfqnl_subsys = {
973 .name = "nf_queue",
974 .subsys_id = NFNL_SUBSYS_QUEUE,
975 .cb_count = NFQNL_MSG_MAX,
976 .cb = nfqnl_cb,
979 #ifdef CONFIG_PROC_FS
980 struct iter_state {
981 unsigned int bucket;
984 static struct hlist_node *get_first(struct seq_file *seq)
986 struct iter_state *st = seq->private;
988 if (!st)
989 return NULL;
991 for (st->bucket = 0; st->bucket < INSTANCE_BUCKETS; st->bucket++) {
992 if (!hlist_empty(&instance_table[st->bucket]))
993 return instance_table[st->bucket].first;
995 return NULL;
998 static struct hlist_node *get_next(struct seq_file *seq, struct hlist_node *h)
1000 struct iter_state *st = seq->private;
1002 h = h->next;
1003 while (!h) {
1004 if (++st->bucket >= INSTANCE_BUCKETS)
1005 return NULL;
1007 h = instance_table[st->bucket].first;
1009 return h;
1012 static struct hlist_node *get_idx(struct seq_file *seq, loff_t pos)
1014 struct hlist_node *head;
1015 head = get_first(seq);
1017 if (head)
1018 while (pos && (head = get_next(seq, head)))
1019 pos--;
1020 return pos ? NULL : head;
1023 static void *seq_start(struct seq_file *seq, loff_t *pos)
1025 read_lock_bh(&instances_lock);
1026 return get_idx(seq, *pos);
1029 static void *seq_next(struct seq_file *s, void *v, loff_t *pos)
1031 (*pos)++;
1032 return get_next(s, v);
1035 static void seq_stop(struct seq_file *s, void *v)
1037 read_unlock_bh(&instances_lock);
1040 static int seq_show(struct seq_file *s, void *v)
1042 const struct nfqnl_instance *inst = v;
1044 return seq_printf(s, "%5d %6d %5d %1d %5d %5d %5d %8d %2d\n",
1045 inst->queue_num,
1046 inst->peer_pid, inst->queue_total,
1047 inst->copy_mode, inst->copy_range,
1048 inst->queue_dropped, inst->queue_user_dropped,
1049 atomic_read(&inst->id_sequence),
1050 atomic_read(&inst->use));
1053 static struct seq_operations nfqnl_seq_ops = {
1054 .start = seq_start,
1055 .next = seq_next,
1056 .stop = seq_stop,
1057 .show = seq_show,
1060 static int nfqnl_open(struct inode *inode, struct file *file)
1062 struct seq_file *seq;
1063 struct iter_state *is;
1064 int ret;
1066 is = kzalloc(sizeof(*is), GFP_KERNEL);
1067 if (!is)
1068 return -ENOMEM;
1069 ret = seq_open(file, &nfqnl_seq_ops);
1070 if (ret < 0)
1071 goto out_free;
1072 seq = file->private_data;
1073 seq->private = is;
1074 return ret;
1075 out_free:
1076 kfree(is);
1077 return ret;
1080 static const struct file_operations nfqnl_file_ops = {
1081 .owner = THIS_MODULE,
1082 .open = nfqnl_open,
1083 .read = seq_read,
1084 .llseek = seq_lseek,
1085 .release = seq_release_private,
1088 #endif /* PROC_FS */
1090 static int __init nfnetlink_queue_init(void)
1092 int i, status = -ENOMEM;
1093 #ifdef CONFIG_PROC_FS
1094 struct proc_dir_entry *proc_nfqueue;
1095 #endif
1097 for (i = 0; i < INSTANCE_BUCKETS; i++)
1098 INIT_HLIST_HEAD(&instance_table[i]);
1100 netlink_register_notifier(&nfqnl_rtnl_notifier);
1101 status = nfnetlink_subsys_register(&nfqnl_subsys);
1102 if (status < 0) {
1103 printk(KERN_ERR "nf_queue: failed to create netlink socket\n");
1104 goto cleanup_netlink_notifier;
1107 #ifdef CONFIG_PROC_FS
1108 proc_nfqueue = create_proc_entry("nfnetlink_queue", 0440,
1109 proc_net_netfilter);
1110 if (!proc_nfqueue)
1111 goto cleanup_subsys;
1112 proc_nfqueue->proc_fops = &nfqnl_file_ops;
1113 #endif
1115 register_netdevice_notifier(&nfqnl_dev_notifier);
1116 return status;
1118 #ifdef CONFIG_PROC_FS
1119 cleanup_subsys:
1120 nfnetlink_subsys_unregister(&nfqnl_subsys);
1121 #endif
1122 cleanup_netlink_notifier:
1123 netlink_unregister_notifier(&nfqnl_rtnl_notifier);
1124 return status;
1127 static void __exit nfnetlink_queue_fini(void)
1129 nf_unregister_queue_handlers(&nfqh);
1130 unregister_netdevice_notifier(&nfqnl_dev_notifier);
1131 #ifdef CONFIG_PROC_FS
1132 remove_proc_entry("nfnetlink_queue", proc_net_netfilter);
1133 #endif
1134 nfnetlink_subsys_unregister(&nfqnl_subsys);
1135 netlink_unregister_notifier(&nfqnl_rtnl_notifier);
1138 MODULE_DESCRIPTION("netfilter packet queue handler");
1139 MODULE_AUTHOR("Harald Welte <laforge@netfilter.org>");
1140 MODULE_LICENSE("GPL");
1141 MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_QUEUE);
1143 module_init(nfnetlink_queue_init);
1144 module_exit(nfnetlink_queue_fini);