2 * This is a module which is used for queueing packets and communicating with
3 * userspace via nfetlink.
5 * (C) 2005 by Harald Welte <laforge@netfilter.org>
7 * Based on the old ipv4-only ip_queue.c:
8 * (C) 2000-2002 James Morris <jmorris@intercode.com.au>
9 * (C) 2003-2005 Netfilter Core Team <coreteam@netfilter.org>
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License version 2 as
13 * published by the Free Software Foundation.
16 #include <linux/module.h>
17 #include <linux/skbuff.h>
18 #include <linux/init.h>
19 #include <linux/spinlock.h>
20 #include <linux/notifier.h>
21 #include <linux/netdevice.h>
22 #include <linux/netfilter.h>
23 #include <linux/proc_fs.h>
24 #include <linux/netfilter_ipv4.h>
25 #include <linux/netfilter_ipv6.h>
26 #include <linux/netfilter/nfnetlink.h>
27 #include <linux/netfilter/nfnetlink_queue.h>
28 #include <linux/list.h>
31 #include <asm/atomic.h>
33 #ifdef CONFIG_BRIDGE_NETFILTER
34 #include "../bridge/br_private.h"
37 #define NFQNL_QMAX_DEFAULT 1024
40 #define QDEBUG(x, args ...) printk(KERN_DEBUG "%s(%d):%s(): " x, \
41 __FILE__, __LINE__, __FUNCTION__, \
44 #define QDEBUG(x, ...)
47 struct nfqnl_queue_entry
{
48 struct list_head list
;
54 struct nfqnl_instance
{
55 struct hlist_node hlist
; /* global list of queues */
59 unsigned int queue_maxlen
;
60 unsigned int copy_range
;
61 unsigned int queue_total
;
62 unsigned int queue_dropped
;
63 unsigned int queue_user_dropped
;
65 atomic_t id_sequence
; /* 'sequence' of pkt ids */
67 u_int16_t queue_num
; /* number of this queue */
72 struct list_head queue_list
; /* packets in queue */
75 typedef int (*nfqnl_cmpfn
)(struct nfqnl_queue_entry
*, unsigned long);
77 static DEFINE_RWLOCK(instances_lock
);
79 #define INSTANCE_BUCKETS 16
80 static struct hlist_head instance_table
[INSTANCE_BUCKETS
];
82 static inline u_int8_t
instance_hashfn(u_int16_t queue_num
)
84 return ((queue_num
>> 8) | queue_num
) % INSTANCE_BUCKETS
;
87 static struct nfqnl_instance
*
88 __instance_lookup(u_int16_t queue_num
)
90 struct hlist_head
*head
;
91 struct hlist_node
*pos
;
92 struct nfqnl_instance
*inst
;
94 head
= &instance_table
[instance_hashfn(queue_num
)];
95 hlist_for_each_entry(inst
, pos
, head
, hlist
) {
96 if (inst
->queue_num
== queue_num
)
102 static struct nfqnl_instance
*
103 instance_lookup_get(u_int16_t queue_num
)
105 struct nfqnl_instance
*inst
;
107 read_lock_bh(&instances_lock
);
108 inst
= __instance_lookup(queue_num
);
110 atomic_inc(&inst
->use
);
111 read_unlock_bh(&instances_lock
);
117 instance_put(struct nfqnl_instance
*inst
)
119 if (inst
&& atomic_dec_and_test(&inst
->use
)) {
120 QDEBUG("kfree(inst=%p)\n", inst
);
125 static struct nfqnl_instance
*
126 instance_create(u_int16_t queue_num
, int pid
)
128 struct nfqnl_instance
*inst
;
130 QDEBUG("entering for queue_num=%u, pid=%d\n", queue_num
, pid
);
132 write_lock_bh(&instances_lock
);
133 if (__instance_lookup(queue_num
)) {
135 QDEBUG("aborting, instance already exists\n");
139 inst
= kzalloc(sizeof(*inst
), GFP_ATOMIC
);
143 inst
->queue_num
= queue_num
;
144 inst
->peer_pid
= pid
;
145 inst
->queue_maxlen
= NFQNL_QMAX_DEFAULT
;
146 inst
->copy_range
= 0xfffff;
147 inst
->copy_mode
= NFQNL_COPY_NONE
;
148 atomic_set(&inst
->id_sequence
, 0);
149 /* needs to be two, since we _put() after creation */
150 atomic_set(&inst
->use
, 2);
151 spin_lock_init(&inst
->lock
);
152 INIT_LIST_HEAD(&inst
->queue_list
);
154 if (!try_module_get(THIS_MODULE
))
157 hlist_add_head(&inst
->hlist
,
158 &instance_table
[instance_hashfn(queue_num
)]);
160 write_unlock_bh(&instances_lock
);
162 QDEBUG("successfully created new instance\n");
169 write_unlock_bh(&instances_lock
);
173 static void nfqnl_flush(struct nfqnl_instance
*queue
, int verdict
);
176 _instance_destroy2(struct nfqnl_instance
*inst
, int lock
)
178 /* first pull it out of the global list */
180 write_lock_bh(&instances_lock
);
182 QDEBUG("removing instance %p (queuenum=%u) from hash\n",
183 inst
, inst
->queue_num
);
184 hlist_del(&inst
->hlist
);
187 write_unlock_bh(&instances_lock
);
189 /* then flush all pending skbs from the queue */
190 nfqnl_flush(inst
, NF_DROP
);
192 /* and finally put the refcount */
195 module_put(THIS_MODULE
);
199 __instance_destroy(struct nfqnl_instance
*inst
)
201 _instance_destroy2(inst
, 0);
205 instance_destroy(struct nfqnl_instance
*inst
)
207 _instance_destroy2(inst
, 1);
213 issue_verdict(struct nfqnl_queue_entry
*entry
, int verdict
)
215 QDEBUG("entering for entry %p, verdict %u\n", entry
, verdict
);
217 /* TCP input path (and probably other bits) assume to be called
218 * from softirq context, not from syscall, like issue_verdict is
219 * called. TCP input path deadlocks with locks taken from timer
220 * softirq, e.g. We therefore emulate this by local_bh_disable() */
223 nf_reinject(entry
->skb
, entry
->info
, verdict
);
230 __enqueue_entry(struct nfqnl_instance
*queue
,
231 struct nfqnl_queue_entry
*entry
)
233 list_add(&entry
->list
, &queue
->queue_list
);
234 queue
->queue_total
++;
238 * Find and return a queued entry matched by cmpfn, or return the last
239 * entry if cmpfn is NULL.
241 static inline struct nfqnl_queue_entry
*
242 __find_entry(struct nfqnl_instance
*queue
, nfqnl_cmpfn cmpfn
,
247 list_for_each_prev(p
, &queue
->queue_list
) {
248 struct nfqnl_queue_entry
*entry
= (struct nfqnl_queue_entry
*)p
;
250 if (!cmpfn
|| cmpfn(entry
, data
))
257 __dequeue_entry(struct nfqnl_instance
*q
, struct nfqnl_queue_entry
*entry
)
259 list_del(&entry
->list
);
263 static inline struct nfqnl_queue_entry
*
264 __find_dequeue_entry(struct nfqnl_instance
*queue
,
265 nfqnl_cmpfn cmpfn
, unsigned long data
)
267 struct nfqnl_queue_entry
*entry
;
269 entry
= __find_entry(queue
, cmpfn
, data
);
273 __dequeue_entry(queue
, entry
);
279 __nfqnl_flush(struct nfqnl_instance
*queue
, int verdict
)
281 struct nfqnl_queue_entry
*entry
;
283 while ((entry
= __find_dequeue_entry(queue
, NULL
, 0)))
284 issue_verdict(entry
, verdict
);
288 __nfqnl_set_mode(struct nfqnl_instance
*queue
,
289 unsigned char mode
, unsigned int range
)
294 case NFQNL_COPY_NONE
:
295 case NFQNL_COPY_META
:
296 queue
->copy_mode
= mode
;
297 queue
->copy_range
= 0;
300 case NFQNL_COPY_PACKET
:
301 queue
->copy_mode
= mode
;
302 /* we're using struct nfattr which has 16bit nfa_len */
304 queue
->copy_range
= 0xffff;
306 queue
->copy_range
= range
;
316 static struct nfqnl_queue_entry
*
317 find_dequeue_entry(struct nfqnl_instance
*queue
,
318 nfqnl_cmpfn cmpfn
, unsigned long data
)
320 struct nfqnl_queue_entry
*entry
;
322 spin_lock_bh(&queue
->lock
);
323 entry
= __find_dequeue_entry(queue
, cmpfn
, data
);
324 spin_unlock_bh(&queue
->lock
);
330 nfqnl_flush(struct nfqnl_instance
*queue
, int verdict
)
332 spin_lock_bh(&queue
->lock
);
333 __nfqnl_flush(queue
, verdict
);
334 spin_unlock_bh(&queue
->lock
);
337 static struct sk_buff
*
338 nfqnl_build_packet_message(struct nfqnl_instance
*queue
,
339 struct nfqnl_queue_entry
*entry
, int *errp
)
341 unsigned char *old_tail
;
345 struct nfqnl_msg_packet_hdr pmsg
;
346 struct nlmsghdr
*nlh
;
347 struct nfgenmsg
*nfmsg
;
348 struct nf_info
*entinf
= entry
->info
;
349 struct sk_buff
*entskb
= entry
->skb
;
350 struct net_device
*indev
;
351 struct net_device
*outdev
;
352 unsigned int tmp_uint
;
356 /* all macros expand to constant values at compile time */
357 size
= NLMSG_SPACE(sizeof(struct nfgenmsg
)) +
358 + NFA_SPACE(sizeof(struct nfqnl_msg_packet_hdr
))
359 + NFA_SPACE(sizeof(u_int32_t
)) /* ifindex */
360 + NFA_SPACE(sizeof(u_int32_t
)) /* ifindex */
361 #ifdef CONFIG_BRIDGE_NETFILTER
362 + NFA_SPACE(sizeof(u_int32_t
)) /* ifindex */
363 + NFA_SPACE(sizeof(u_int32_t
)) /* ifindex */
365 + NFA_SPACE(sizeof(u_int32_t
)) /* mark */
366 + NFA_SPACE(sizeof(struct nfqnl_msg_packet_hw
))
367 + NFA_SPACE(sizeof(struct nfqnl_msg_packet_timestamp
));
369 outdev
= entinf
->outdev
;
371 spin_lock_bh(&queue
->lock
);
373 switch (queue
->copy_mode
) {
374 case NFQNL_COPY_META
:
375 case NFQNL_COPY_NONE
:
379 case NFQNL_COPY_PACKET
:
380 if ((entskb
->ip_summed
== CHECKSUM_PARTIAL
||
381 entskb
->ip_summed
== CHECKSUM_COMPLETE
) &&
382 (*errp
= skb_checksum_help(entskb
))) {
383 spin_unlock_bh(&queue
->lock
);
386 if (queue
->copy_range
== 0
387 || queue
->copy_range
> entskb
->len
)
388 data_len
= entskb
->len
;
390 data_len
= queue
->copy_range
;
392 size
+= NFA_SPACE(data_len
);
397 spin_unlock_bh(&queue
->lock
);
401 spin_unlock_bh(&queue
->lock
);
403 skb
= alloc_skb(size
, GFP_ATOMIC
);
408 nlh
= NLMSG_PUT(skb
, 0, 0,
409 NFNL_SUBSYS_QUEUE
<< 8 | NFQNL_MSG_PACKET
,
410 sizeof(struct nfgenmsg
));
411 nfmsg
= NLMSG_DATA(nlh
);
412 nfmsg
->nfgen_family
= entinf
->pf
;
413 nfmsg
->version
= NFNETLINK_V0
;
414 nfmsg
->res_id
= htons(queue
->queue_num
);
416 pmsg
.packet_id
= htonl(entry
->id
);
417 pmsg
.hw_protocol
= entskb
->protocol
;
418 pmsg
.hook
= entinf
->hook
;
420 NFA_PUT(skb
, NFQA_PACKET_HDR
, sizeof(pmsg
), &pmsg
);
422 indev
= entinf
->indev
;
424 tmp_uint
= htonl(indev
->ifindex
);
425 #ifndef CONFIG_BRIDGE_NETFILTER
426 NFA_PUT(skb
, NFQA_IFINDEX_INDEV
, sizeof(tmp_uint
), &tmp_uint
);
428 if (entinf
->pf
== PF_BRIDGE
) {
429 /* Case 1: indev is physical input device, we need to
430 * look for bridge group (when called from
431 * netfilter_bridge) */
432 NFA_PUT(skb
, NFQA_IFINDEX_PHYSINDEV
, sizeof(tmp_uint
),
434 /* this is the bridge group "brX" */
435 tmp_uint
= htonl(indev
->br_port
->br
->dev
->ifindex
);
436 NFA_PUT(skb
, NFQA_IFINDEX_INDEV
, sizeof(tmp_uint
),
439 /* Case 2: indev is bridge group, we need to look for
440 * physical device (when called from ipv4) */
441 NFA_PUT(skb
, NFQA_IFINDEX_INDEV
, sizeof(tmp_uint
),
443 if (entskb
->nf_bridge
444 && entskb
->nf_bridge
->physindev
) {
445 tmp_uint
= htonl(entskb
->nf_bridge
->physindev
->ifindex
);
446 NFA_PUT(skb
, NFQA_IFINDEX_PHYSINDEV
,
447 sizeof(tmp_uint
), &tmp_uint
);
454 tmp_uint
= htonl(outdev
->ifindex
);
455 #ifndef CONFIG_BRIDGE_NETFILTER
456 NFA_PUT(skb
, NFQA_IFINDEX_OUTDEV
, sizeof(tmp_uint
), &tmp_uint
);
458 if (entinf
->pf
== PF_BRIDGE
) {
459 /* Case 1: outdev is physical output device, we need to
460 * look for bridge group (when called from
461 * netfilter_bridge) */
462 NFA_PUT(skb
, NFQA_IFINDEX_PHYSOUTDEV
, sizeof(tmp_uint
),
464 /* this is the bridge group "brX" */
465 tmp_uint
= htonl(outdev
->br_port
->br
->dev
->ifindex
);
466 NFA_PUT(skb
, NFQA_IFINDEX_OUTDEV
, sizeof(tmp_uint
),
469 /* Case 2: outdev is bridge group, we need to look for
470 * physical output device (when called from ipv4) */
471 NFA_PUT(skb
, NFQA_IFINDEX_OUTDEV
, sizeof(tmp_uint
),
473 if (entskb
->nf_bridge
474 && entskb
->nf_bridge
->physoutdev
) {
475 tmp_uint
= htonl(entskb
->nf_bridge
->physoutdev
->ifindex
);
476 NFA_PUT(skb
, NFQA_IFINDEX_PHYSOUTDEV
,
477 sizeof(tmp_uint
), &tmp_uint
);
483 if (entskb
->nfmark
) {
484 tmp_uint
= htonl(entskb
->nfmark
);
485 NFA_PUT(skb
, NFQA_MARK
, sizeof(u_int32_t
), &tmp_uint
);
488 if (indev
&& entskb
->dev
489 && entskb
->dev
->hard_header_parse
) {
490 struct nfqnl_msg_packet_hw phw
;
493 entskb
->dev
->hard_header_parse(entskb
,
495 phw
.hw_addrlen
= htons(phw
.hw_addrlen
);
496 NFA_PUT(skb
, NFQA_HWADDR
, sizeof(phw
), &phw
);
499 if (entskb
->tstamp
.off_sec
) {
500 struct nfqnl_msg_packet_timestamp ts
;
502 ts
.sec
= cpu_to_be64(entskb
->tstamp
.off_sec
);
503 ts
.usec
= cpu_to_be64(entskb
->tstamp
.off_usec
);
505 NFA_PUT(skb
, NFQA_TIMESTAMP
, sizeof(ts
), &ts
);
510 int size
= NFA_LENGTH(data_len
);
512 if (skb_tailroom(skb
) < (int)NFA_SPACE(data_len
)) {
513 printk(KERN_WARNING
"nf_queue: no tailroom!\n");
517 nfa
= (struct nfattr
*)skb_put(skb
, NFA_ALIGN(size
));
518 nfa
->nfa_type
= NFQA_PAYLOAD
;
521 if (skb_copy_bits(entskb
, 0, NFA_DATA(nfa
), data_len
))
525 nlh
->nlmsg_len
= skb
->tail
- old_tail
;
534 printk(KERN_ERR
"nf_queue: error creating packet message\n");
539 nfqnl_enqueue_packet(struct sk_buff
*skb
, struct nf_info
*info
,
540 unsigned int queuenum
, void *data
)
542 int status
= -EINVAL
;
543 struct sk_buff
*nskb
;
544 struct nfqnl_instance
*queue
;
545 struct nfqnl_queue_entry
*entry
;
549 queue
= instance_lookup_get(queuenum
);
551 QDEBUG("no queue instance matching\n");
555 if (queue
->copy_mode
== NFQNL_COPY_NONE
) {
556 QDEBUG("mode COPY_NONE, aborting\n");
561 entry
= kmalloc(sizeof(*entry
), GFP_ATOMIC
);
565 "nf_queue: OOM in nfqnl_enqueue_packet()\n");
572 entry
->id
= atomic_inc_return(&queue
->id_sequence
);
574 nskb
= nfqnl_build_packet_message(queue
, entry
, &status
);
578 spin_lock_bh(&queue
->lock
);
580 if (!queue
->peer_pid
)
581 goto err_out_free_nskb
;
583 if (queue
->queue_total
>= queue
->queue_maxlen
) {
584 queue
->queue_dropped
++;
587 printk(KERN_WARNING
"nf_queue: full at %d entries, "
588 "dropping packets(s). Dropped: %d\n",
589 queue
->queue_total
, queue
->queue_dropped
);
590 goto err_out_free_nskb
;
593 /* nfnetlink_unicast will either free the nskb or add it to a socket */
594 status
= nfnetlink_unicast(nskb
, queue
->peer_pid
, MSG_DONTWAIT
);
596 queue
->queue_user_dropped
++;
600 __enqueue_entry(queue
, entry
);
602 spin_unlock_bh(&queue
->lock
);
610 spin_unlock_bh(&queue
->lock
);
620 nfqnl_mangle(void *data
, int data_len
, struct nfqnl_queue_entry
*e
)
624 diff
= data_len
- e
->skb
->len
;
626 skb_trim(e
->skb
, data_len
);
628 if (data_len
> 0xFFFF)
630 if (diff
> skb_tailroom(e
->skb
)) {
631 struct sk_buff
*newskb
;
633 newskb
= skb_copy_expand(e
->skb
,
634 skb_headroom(e
->skb
),
637 if (newskb
== NULL
) {
638 printk(KERN_WARNING
"nf_queue: OOM "
639 "in mangle, dropping packet\n");
643 skb_set_owner_w(newskb
, e
->skb
->sk
);
647 skb_put(e
->skb
, diff
);
649 if (!skb_make_writable(&e
->skb
, data_len
))
651 memcpy(e
->skb
->data
, data
, data_len
);
652 e
->skb
->ip_summed
= CHECKSUM_NONE
;
657 id_cmp(struct nfqnl_queue_entry
*e
, unsigned long id
)
659 return (id
== e
->id
);
663 nfqnl_set_mode(struct nfqnl_instance
*queue
,
664 unsigned char mode
, unsigned int range
)
668 spin_lock_bh(&queue
->lock
);
669 status
= __nfqnl_set_mode(queue
, mode
, range
);
670 spin_unlock_bh(&queue
->lock
);
676 dev_cmp(struct nfqnl_queue_entry
*entry
, unsigned long ifindex
)
678 struct nf_info
*entinf
= entry
->info
;
681 if (entinf
->indev
->ifindex
== ifindex
)
684 if (entinf
->outdev
->ifindex
== ifindex
)
686 #ifdef CONFIG_BRIDGE_NETFILTER
687 if (entry
->skb
->nf_bridge
) {
688 if (entry
->skb
->nf_bridge
->physindev
&&
689 entry
->skb
->nf_bridge
->physindev
->ifindex
== ifindex
)
691 if (entry
->skb
->nf_bridge
->physoutdev
&&
692 entry
->skb
->nf_bridge
->physoutdev
->ifindex
== ifindex
)
699 /* drop all packets with either indev or outdev == ifindex from all queue
702 nfqnl_dev_drop(int ifindex
)
706 QDEBUG("entering for ifindex %u\n", ifindex
);
708 /* this only looks like we have to hold the readlock for a way too long
709 * time, issue_verdict(), nf_reinject(), ... - but we always only
710 * issue NF_DROP, which is processed directly in nf_reinject() */
711 read_lock_bh(&instances_lock
);
713 for (i
= 0; i
< INSTANCE_BUCKETS
; i
++) {
714 struct hlist_node
*tmp
;
715 struct nfqnl_instance
*inst
;
716 struct hlist_head
*head
= &instance_table
[i
];
718 hlist_for_each_entry(inst
, tmp
, head
, hlist
) {
719 struct nfqnl_queue_entry
*entry
;
720 while ((entry
= find_dequeue_entry(inst
, dev_cmp
,
722 issue_verdict(entry
, NF_DROP
);
726 read_unlock_bh(&instances_lock
);
729 #define RCV_SKB_FAIL(err) do { netlink_ack(skb, nlh, (err)); return; } while (0)
732 nfqnl_rcv_dev_event(struct notifier_block
*this,
733 unsigned long event
, void *ptr
)
735 struct net_device
*dev
= ptr
;
737 /* Drop any packets associated with the downed device */
738 if (event
== NETDEV_DOWN
)
739 nfqnl_dev_drop(dev
->ifindex
);
743 static struct notifier_block nfqnl_dev_notifier
= {
744 .notifier_call
= nfqnl_rcv_dev_event
,
748 nfqnl_rcv_nl_event(struct notifier_block
*this,
749 unsigned long event
, void *ptr
)
751 struct netlink_notify
*n
= ptr
;
753 if (event
== NETLINK_URELEASE
&&
754 n
->protocol
== NETLINK_NETFILTER
&& n
->pid
) {
757 /* destroy all instances for this pid */
758 write_lock_bh(&instances_lock
);
759 for (i
= 0; i
< INSTANCE_BUCKETS
; i
++) {
760 struct hlist_node
*tmp
, *t2
;
761 struct nfqnl_instance
*inst
;
762 struct hlist_head
*head
= &instance_table
[i
];
764 hlist_for_each_entry_safe(inst
, tmp
, t2
, head
, hlist
) {
765 if (n
->pid
== inst
->peer_pid
)
766 __instance_destroy(inst
);
769 write_unlock_bh(&instances_lock
);
774 static struct notifier_block nfqnl_rtnl_notifier
= {
775 .notifier_call
= nfqnl_rcv_nl_event
,
778 static const int nfqa_verdict_min
[NFQA_MAX
] = {
779 [NFQA_VERDICT_HDR
-1] = sizeof(struct nfqnl_msg_verdict_hdr
),
780 [NFQA_MARK
-1] = sizeof(u_int32_t
),
781 [NFQA_PAYLOAD
-1] = 0,
785 nfqnl_recv_verdict(struct sock
*ctnl
, struct sk_buff
*skb
,
786 struct nlmsghdr
*nlh
, struct nfattr
*nfqa
[], int *errp
)
788 struct nfgenmsg
*nfmsg
= NLMSG_DATA(nlh
);
789 u_int16_t queue_num
= ntohs(nfmsg
->res_id
);
791 struct nfqnl_msg_verdict_hdr
*vhdr
;
792 struct nfqnl_instance
*queue
;
793 unsigned int verdict
;
794 struct nfqnl_queue_entry
*entry
;
797 if (nfattr_bad_size(nfqa
, NFQA_MAX
, nfqa_verdict_min
)) {
798 QDEBUG("bad attribute size\n");
802 queue
= instance_lookup_get(queue_num
);
806 if (queue
->peer_pid
!= NETLINK_CB(skb
).pid
) {
811 if (!nfqa
[NFQA_VERDICT_HDR
-1]) {
816 vhdr
= NFA_DATA(nfqa
[NFQA_VERDICT_HDR
-1]);
817 verdict
= ntohl(vhdr
->verdict
);
819 if ((verdict
& NF_VERDICT_MASK
) > NF_MAX_VERDICT
) {
824 entry
= find_dequeue_entry(queue
, id_cmp
, ntohl(vhdr
->id
));
830 if (nfqa
[NFQA_PAYLOAD
-1]) {
831 if (nfqnl_mangle(NFA_DATA(nfqa
[NFQA_PAYLOAD
-1]),
832 NFA_PAYLOAD(nfqa
[NFQA_PAYLOAD
-1]), entry
) < 0)
836 if (nfqa
[NFQA_MARK
-1])
837 entry
->skb
->nfmark
= ntohl(*(u_int32_t
*)
838 NFA_DATA(nfqa
[NFQA_MARK
-1]));
840 issue_verdict(entry
, verdict
);
850 nfqnl_recv_unsupp(struct sock
*ctnl
, struct sk_buff
*skb
,
851 struct nlmsghdr
*nlh
, struct nfattr
*nfqa
[], int *errp
)
856 static const int nfqa_cfg_min
[NFQA_CFG_MAX
] = {
857 [NFQA_CFG_CMD
-1] = sizeof(struct nfqnl_msg_config_cmd
),
858 [NFQA_CFG_PARAMS
-1] = sizeof(struct nfqnl_msg_config_params
),
861 static struct nf_queue_handler nfqh
= {
863 .outfn
= &nfqnl_enqueue_packet
,
867 nfqnl_recv_config(struct sock
*ctnl
, struct sk_buff
*skb
,
868 struct nlmsghdr
*nlh
, struct nfattr
*nfqa
[], int *errp
)
870 struct nfgenmsg
*nfmsg
= NLMSG_DATA(nlh
);
871 u_int16_t queue_num
= ntohs(nfmsg
->res_id
);
872 struct nfqnl_instance
*queue
;
875 QDEBUG("entering for msg %u\n", NFNL_MSG_TYPE(nlh
->nlmsg_type
));
877 if (nfattr_bad_size(nfqa
, NFQA_CFG_MAX
, nfqa_cfg_min
)) {
878 QDEBUG("bad attribute size\n");
882 queue
= instance_lookup_get(queue_num
);
883 if (nfqa
[NFQA_CFG_CMD
-1]) {
884 struct nfqnl_msg_config_cmd
*cmd
;
885 cmd
= NFA_DATA(nfqa
[NFQA_CFG_CMD
-1]);
886 QDEBUG("found CFG_CMD\n");
888 switch (cmd
->command
) {
889 case NFQNL_CFG_CMD_BIND
:
893 queue
= instance_create(queue_num
, NETLINK_CB(skb
).pid
);
897 case NFQNL_CFG_CMD_UNBIND
:
901 if (queue
->peer_pid
!= NETLINK_CB(skb
).pid
) {
906 instance_destroy(queue
);
908 case NFQNL_CFG_CMD_PF_BIND
:
909 QDEBUG("registering queue handler for pf=%u\n",
911 ret
= nf_register_queue_handler(ntohs(cmd
->pf
), &nfqh
);
913 case NFQNL_CFG_CMD_PF_UNBIND
:
914 QDEBUG("unregistering queue handler for pf=%u\n",
916 /* This is a bug and a feature. We can unregister
917 * other handlers(!) */
918 ret
= nf_unregister_queue_handler(ntohs(cmd
->pf
));
926 QDEBUG("no config command, and no instance ENOENT\n");
931 if (queue
->peer_pid
!= NETLINK_CB(skb
).pid
) {
932 QDEBUG("no config command, and wrong pid\n");
938 if (nfqa
[NFQA_CFG_PARAMS
-1]) {
939 struct nfqnl_msg_config_params
*params
;
945 params
= NFA_DATA(nfqa
[NFQA_CFG_PARAMS
-1]);
946 nfqnl_set_mode(queue
, params
->copy_mode
,
947 ntohl(params
->copy_range
));
955 static struct nfnl_callback nfqnl_cb
[NFQNL_MSG_MAX
] = {
956 [NFQNL_MSG_PACKET
] = { .call
= nfqnl_recv_unsupp
,
957 .attr_count
= NFQA_MAX
, },
958 [NFQNL_MSG_VERDICT
] = { .call
= nfqnl_recv_verdict
,
959 .attr_count
= NFQA_MAX
, },
960 [NFQNL_MSG_CONFIG
] = { .call
= nfqnl_recv_config
,
961 .attr_count
= NFQA_CFG_MAX
, },
964 static struct nfnetlink_subsystem nfqnl_subsys
= {
966 .subsys_id
= NFNL_SUBSYS_QUEUE
,
967 .cb_count
= NFQNL_MSG_MAX
,
971 #ifdef CONFIG_PROC_FS
976 static struct hlist_node
*get_first(struct seq_file
*seq
)
978 struct iter_state
*st
= seq
->private;
983 for (st
->bucket
= 0; st
->bucket
< INSTANCE_BUCKETS
; st
->bucket
++) {
984 if (!hlist_empty(&instance_table
[st
->bucket
]))
985 return instance_table
[st
->bucket
].first
;
990 static struct hlist_node
*get_next(struct seq_file
*seq
, struct hlist_node
*h
)
992 struct iter_state
*st
= seq
->private;
996 if (++st
->bucket
>= INSTANCE_BUCKETS
)
999 h
= instance_table
[st
->bucket
].first
;
1004 static struct hlist_node
*get_idx(struct seq_file
*seq
, loff_t pos
)
1006 struct hlist_node
*head
;
1007 head
= get_first(seq
);
1010 while (pos
&& (head
= get_next(seq
, head
)))
1012 return pos
? NULL
: head
;
1015 static void *seq_start(struct seq_file
*seq
, loff_t
*pos
)
1017 read_lock_bh(&instances_lock
);
1018 return get_idx(seq
, *pos
);
1021 static void *seq_next(struct seq_file
*s
, void *v
, loff_t
*pos
)
1024 return get_next(s
, v
);
1027 static void seq_stop(struct seq_file
*s
, void *v
)
1029 read_unlock_bh(&instances_lock
);
1032 static int seq_show(struct seq_file
*s
, void *v
)
1034 const struct nfqnl_instance
*inst
= v
;
1036 return seq_printf(s
, "%5d %6d %5d %1d %5d %5d %5d %8d %2d\n",
1038 inst
->peer_pid
, inst
->queue_total
,
1039 inst
->copy_mode
, inst
->copy_range
,
1040 inst
->queue_dropped
, inst
->queue_user_dropped
,
1041 atomic_read(&inst
->id_sequence
),
1042 atomic_read(&inst
->use
));
1045 static struct seq_operations nfqnl_seq_ops
= {
1052 static int nfqnl_open(struct inode
*inode
, struct file
*file
)
1054 struct seq_file
*seq
;
1055 struct iter_state
*is
;
1058 is
= kzalloc(sizeof(*is
), GFP_KERNEL
);
1061 ret
= seq_open(file
, &nfqnl_seq_ops
);
1064 seq
= file
->private_data
;
1072 static struct file_operations nfqnl_file_ops
= {
1073 .owner
= THIS_MODULE
,
1076 .llseek
= seq_lseek
,
1077 .release
= seq_release_private
,
1080 #endif /* PROC_FS */
1082 static int __init
nfnetlink_queue_init(void)
1084 int i
, status
= -ENOMEM
;
1085 #ifdef CONFIG_PROC_FS
1086 struct proc_dir_entry
*proc_nfqueue
;
1089 for (i
= 0; i
< INSTANCE_BUCKETS
; i
++)
1090 INIT_HLIST_HEAD(&instance_table
[i
]);
1092 netlink_register_notifier(&nfqnl_rtnl_notifier
);
1093 status
= nfnetlink_subsys_register(&nfqnl_subsys
);
1095 printk(KERN_ERR
"nf_queue: failed to create netlink socket\n");
1096 goto cleanup_netlink_notifier
;
1099 #ifdef CONFIG_PROC_FS
1100 proc_nfqueue
= create_proc_entry("nfnetlink_queue", 0440,
1101 proc_net_netfilter
);
1103 goto cleanup_subsys
;
1104 proc_nfqueue
->proc_fops
= &nfqnl_file_ops
;
1107 register_netdevice_notifier(&nfqnl_dev_notifier
);
1110 #ifdef CONFIG_PROC_FS
1112 nfnetlink_subsys_unregister(&nfqnl_subsys
);
1114 cleanup_netlink_notifier
:
1115 netlink_unregister_notifier(&nfqnl_rtnl_notifier
);
1119 static void __exit
nfnetlink_queue_fini(void)
1121 nf_unregister_queue_handlers(&nfqh
);
1122 unregister_netdevice_notifier(&nfqnl_dev_notifier
);
1123 #ifdef CONFIG_PROC_FS
1124 remove_proc_entry("nfnetlink_queue", proc_net_netfilter
);
1126 nfnetlink_subsys_unregister(&nfqnl_subsys
);
1127 netlink_unregister_notifier(&nfqnl_rtnl_notifier
);
1130 MODULE_DESCRIPTION("netfilter packet queue handler");
1131 MODULE_AUTHOR("Harald Welte <laforge@netfilter.org>");
1132 MODULE_LICENSE("GPL");
1133 MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_QUEUE
);
1135 module_init(nfnetlink_queue_init
);
1136 module_exit(nfnetlink_queue_fini
);