2 * This is a module which is used for queueing packets and communicating with
3 * userspace via nfnetlink.
5 * (C) 2005 by Harald Welte <laforge@netfilter.org>
6 * (C) 2007 by Patrick McHardy <kaber@trash.net>
8 * Based on the old ipv4-only ip_queue.c:
9 * (C) 2000-2002 James Morris <jmorris@intercode.com.au>
10 * (C) 2003-2005 Netfilter Core Team <coreteam@netfilter.org>
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License version 2 as
14 * published by the Free Software Foundation.
17 #include <linux/module.h>
18 #include <linux/skbuff.h>
19 #include <linux/init.h>
20 #include <linux/spinlock.h>
21 #include <linux/slab.h>
22 #include <linux/notifier.h>
23 #include <linux/netdevice.h>
24 #include <linux/netfilter.h>
25 #include <linux/proc_fs.h>
26 #include <linux/netfilter_ipv4.h>
27 #include <linux/netfilter_ipv6.h>
28 #include <linux/netfilter/nfnetlink.h>
29 #include <linux/netfilter/nfnetlink_queue.h>
30 #include <linux/list.h>
32 #include <net/netfilter/nf_queue.h>
34 #include <linux/atomic.h>
36 #ifdef CONFIG_BRIDGE_NETFILTER
37 #include "../bridge/br_private.h"
40 #define NFQNL_QMAX_DEFAULT 1024
42 struct nfqnl_instance
{
43 struct hlist_node hlist
; /* global list of queues */
47 unsigned int queue_maxlen
;
48 unsigned int copy_range
;
49 unsigned int queue_dropped
;
50 unsigned int queue_user_dropped
;
53 u_int16_t queue_num
; /* number of this queue */
56 * Following fields are dirtied for each queued packet,
57 * keep them in same cache line if possible.
60 unsigned int queue_total
;
61 unsigned int id_sequence
; /* 'sequence' of pkt ids */
62 struct list_head queue_list
; /* packets in queue */
65 typedef int (*nfqnl_cmpfn
)(struct nf_queue_entry
*, unsigned long);
67 static DEFINE_SPINLOCK(instances_lock
);
69 #define INSTANCE_BUCKETS 16
70 static struct hlist_head instance_table
[INSTANCE_BUCKETS
] __read_mostly
;
72 static inline u_int8_t
instance_hashfn(u_int16_t queue_num
)
74 return ((queue_num
>> 8) | queue_num
) % INSTANCE_BUCKETS
;
77 static struct nfqnl_instance
*
78 instance_lookup(u_int16_t queue_num
)
80 struct hlist_head
*head
;
81 struct hlist_node
*pos
;
82 struct nfqnl_instance
*inst
;
84 head
= &instance_table
[instance_hashfn(queue_num
)];
85 hlist_for_each_entry_rcu(inst
, pos
, head
, hlist
) {
86 if (inst
->queue_num
== queue_num
)
92 static struct nfqnl_instance
*
93 instance_create(u_int16_t queue_num
, int pid
)
95 struct nfqnl_instance
*inst
;
99 spin_lock(&instances_lock
);
100 if (instance_lookup(queue_num
)) {
105 inst
= kzalloc(sizeof(*inst
), GFP_ATOMIC
);
111 inst
->queue_num
= queue_num
;
112 inst
->peer_pid
= pid
;
113 inst
->queue_maxlen
= NFQNL_QMAX_DEFAULT
;
114 inst
->copy_range
= 0xfffff;
115 inst
->copy_mode
= NFQNL_COPY_NONE
;
116 spin_lock_init(&inst
->lock
);
117 INIT_LIST_HEAD(&inst
->queue_list
);
119 if (!try_module_get(THIS_MODULE
)) {
124 h
= instance_hashfn(queue_num
);
125 hlist_add_head_rcu(&inst
->hlist
, &instance_table
[h
]);
127 spin_unlock(&instances_lock
);
134 spin_unlock(&instances_lock
);
138 static void nfqnl_flush(struct nfqnl_instance
*queue
, nfqnl_cmpfn cmpfn
,
142 instance_destroy_rcu(struct rcu_head
*head
)
144 struct nfqnl_instance
*inst
= container_of(head
, struct nfqnl_instance
,
147 nfqnl_flush(inst
, NULL
, 0);
149 module_put(THIS_MODULE
);
153 __instance_destroy(struct nfqnl_instance
*inst
)
155 hlist_del_rcu(&inst
->hlist
);
156 call_rcu(&inst
->rcu
, instance_destroy_rcu
);
160 instance_destroy(struct nfqnl_instance
*inst
)
162 spin_lock(&instances_lock
);
163 __instance_destroy(inst
);
164 spin_unlock(&instances_lock
);
168 __enqueue_entry(struct nfqnl_instance
*queue
, struct nf_queue_entry
*entry
)
170 list_add_tail(&entry
->list
, &queue
->queue_list
);
171 queue
->queue_total
++;
175 __dequeue_entry(struct nfqnl_instance
*queue
, struct nf_queue_entry
*entry
)
177 list_del(&entry
->list
);
178 queue
->queue_total
--;
181 static struct nf_queue_entry
*
182 find_dequeue_entry(struct nfqnl_instance
*queue
, unsigned int id
)
184 struct nf_queue_entry
*entry
= NULL
, *i
;
186 spin_lock_bh(&queue
->lock
);
188 list_for_each_entry(i
, &queue
->queue_list
, list
) {
196 __dequeue_entry(queue
, entry
);
198 spin_unlock_bh(&queue
->lock
);
204 nfqnl_flush(struct nfqnl_instance
*queue
, nfqnl_cmpfn cmpfn
, unsigned long data
)
206 struct nf_queue_entry
*entry
, *next
;
208 spin_lock_bh(&queue
->lock
);
209 list_for_each_entry_safe(entry
, next
, &queue
->queue_list
, list
) {
210 if (!cmpfn
|| cmpfn(entry
, data
)) {
211 list_del(&entry
->list
);
212 queue
->queue_total
--;
213 nf_reinject(entry
, NF_DROP
);
216 spin_unlock_bh(&queue
->lock
);
219 static struct sk_buff
*
220 nfqnl_build_packet_message(struct nfqnl_instance
*queue
,
221 struct nf_queue_entry
*entry
,
222 __be32
**packet_id_ptr
)
224 sk_buff_data_t old_tail
;
229 struct nfqnl_msg_packet_hdr
*pmsg
;
230 struct nlmsghdr
*nlh
;
231 struct nfgenmsg
*nfmsg
;
232 struct sk_buff
*entskb
= entry
->skb
;
233 struct net_device
*indev
;
234 struct net_device
*outdev
;
236 size
= NLMSG_SPACE(sizeof(struct nfgenmsg
))
237 + nla_total_size(sizeof(struct nfqnl_msg_packet_hdr
))
238 + nla_total_size(sizeof(u_int32_t
)) /* ifindex */
239 + nla_total_size(sizeof(u_int32_t
)) /* ifindex */
240 #ifdef CONFIG_BRIDGE_NETFILTER
241 + nla_total_size(sizeof(u_int32_t
)) /* ifindex */
242 + nla_total_size(sizeof(u_int32_t
)) /* ifindex */
244 + nla_total_size(sizeof(u_int32_t
)) /* mark */
245 + nla_total_size(sizeof(struct nfqnl_msg_packet_hw
))
246 + nla_total_size(sizeof(struct nfqnl_msg_packet_timestamp
));
248 outdev
= entry
->outdev
;
250 switch ((enum nfqnl_config_mode
)ACCESS_ONCE(queue
->copy_mode
)) {
251 case NFQNL_COPY_META
:
252 case NFQNL_COPY_NONE
:
255 case NFQNL_COPY_PACKET
:
256 if (entskb
->ip_summed
== CHECKSUM_PARTIAL
&&
257 skb_checksum_help(entskb
))
260 data_len
= ACCESS_ONCE(queue
->copy_range
);
261 if (data_len
== 0 || data_len
> entskb
->len
)
262 data_len
= entskb
->len
;
264 size
+= nla_total_size(data_len
);
269 skb
= alloc_skb(size
, GFP_ATOMIC
);
273 old_tail
= skb
->tail
;
274 nlh
= NLMSG_PUT(skb
, 0, 0,
275 NFNL_SUBSYS_QUEUE
<< 8 | NFQNL_MSG_PACKET
,
276 sizeof(struct nfgenmsg
));
277 nfmsg
= NLMSG_DATA(nlh
);
278 nfmsg
->nfgen_family
= entry
->pf
;
279 nfmsg
->version
= NFNETLINK_V0
;
280 nfmsg
->res_id
= htons(queue
->queue_num
);
282 nla
= __nla_reserve(skb
, NFQA_PACKET_HDR
, sizeof(*pmsg
));
283 pmsg
= nla_data(nla
);
284 pmsg
->hw_protocol
= entskb
->protocol
;
285 pmsg
->hook
= entry
->hook
;
286 *packet_id_ptr
= &pmsg
->packet_id
;
288 indev
= entry
->indev
;
290 #ifndef CONFIG_BRIDGE_NETFILTER
291 NLA_PUT_BE32(skb
, NFQA_IFINDEX_INDEV
, htonl(indev
->ifindex
));
293 if (entry
->pf
== PF_BRIDGE
) {
294 /* Case 1: indev is physical input device, we need to
295 * look for bridge group (when called from
296 * netfilter_bridge) */
297 NLA_PUT_BE32(skb
, NFQA_IFINDEX_PHYSINDEV
,
298 htonl(indev
->ifindex
));
299 /* this is the bridge group "brX" */
300 /* rcu_read_lock()ed by __nf_queue */
301 NLA_PUT_BE32(skb
, NFQA_IFINDEX_INDEV
,
302 htonl(br_port_get_rcu(indev
)->br
->dev
->ifindex
));
304 /* Case 2: indev is bridge group, we need to look for
305 * physical device (when called from ipv4) */
306 NLA_PUT_BE32(skb
, NFQA_IFINDEX_INDEV
,
307 htonl(indev
->ifindex
));
308 if (entskb
->nf_bridge
&& entskb
->nf_bridge
->physindev
)
309 NLA_PUT_BE32(skb
, NFQA_IFINDEX_PHYSINDEV
,
310 htonl(entskb
->nf_bridge
->physindev
->ifindex
));
316 #ifndef CONFIG_BRIDGE_NETFILTER
317 NLA_PUT_BE32(skb
, NFQA_IFINDEX_OUTDEV
, htonl(outdev
->ifindex
));
319 if (entry
->pf
== PF_BRIDGE
) {
320 /* Case 1: outdev is physical output device, we need to
321 * look for bridge group (when called from
322 * netfilter_bridge) */
323 NLA_PUT_BE32(skb
, NFQA_IFINDEX_PHYSOUTDEV
,
324 htonl(outdev
->ifindex
));
325 /* this is the bridge group "brX" */
326 /* rcu_read_lock()ed by __nf_queue */
327 NLA_PUT_BE32(skb
, NFQA_IFINDEX_OUTDEV
,
328 htonl(br_port_get_rcu(outdev
)->br
->dev
->ifindex
));
330 /* Case 2: outdev is bridge group, we need to look for
331 * physical output device (when called from ipv4) */
332 NLA_PUT_BE32(skb
, NFQA_IFINDEX_OUTDEV
,
333 htonl(outdev
->ifindex
));
334 if (entskb
->nf_bridge
&& entskb
->nf_bridge
->physoutdev
)
335 NLA_PUT_BE32(skb
, NFQA_IFINDEX_PHYSOUTDEV
,
336 htonl(entskb
->nf_bridge
->physoutdev
->ifindex
));
342 NLA_PUT_BE32(skb
, NFQA_MARK
, htonl(entskb
->mark
));
344 if (indev
&& entskb
->dev
&&
345 entskb
->mac_header
!= entskb
->network_header
) {
346 struct nfqnl_msg_packet_hw phw
;
347 int len
= dev_parse_header(entskb
, phw
.hw_addr
);
349 phw
.hw_addrlen
= htons(len
);
350 NLA_PUT(skb
, NFQA_HWADDR
, sizeof(phw
), &phw
);
354 if (entskb
->tstamp
.tv64
) {
355 struct nfqnl_msg_packet_timestamp ts
;
356 struct timeval tv
= ktime_to_timeval(entskb
->tstamp
);
357 ts
.sec
= cpu_to_be64(tv
.tv_sec
);
358 ts
.usec
= cpu_to_be64(tv
.tv_usec
);
360 NLA_PUT(skb
, NFQA_TIMESTAMP
, sizeof(ts
), &ts
);
365 int sz
= nla_attr_size(data_len
);
367 if (skb_tailroom(skb
) < nla_total_size(data_len
)) {
368 printk(KERN_WARNING
"nf_queue: no tailroom!\n");
372 nla
= (struct nlattr
*)skb_put(skb
, nla_total_size(data_len
));
373 nla
->nla_type
= NFQA_PAYLOAD
;
376 if (skb_copy_bits(entskb
, 0, nla_data(nla
), data_len
))
380 nlh
->nlmsg_len
= skb
->tail
- old_tail
;
388 printk(KERN_ERR
"nf_queue: error creating packet message\n");
393 nfqnl_enqueue_packet(struct nf_queue_entry
*entry
, unsigned int queuenum
)
395 struct sk_buff
*nskb
;
396 struct nfqnl_instance
*queue
;
398 __be32
*packet_id_ptr
;
400 /* rcu_read_lock()ed by nf_hook_slow() */
401 queue
= instance_lookup(queuenum
);
407 if (queue
->copy_mode
== NFQNL_COPY_NONE
) {
412 nskb
= nfqnl_build_packet_message(queue
, entry
, &packet_id_ptr
);
417 spin_lock_bh(&queue
->lock
);
419 if (!queue
->peer_pid
) {
421 goto err_out_free_nskb
;
423 if (queue
->queue_total
>= queue
->queue_maxlen
) {
424 queue
->queue_dropped
++;
426 printk(KERN_WARNING
"nf_queue: full at %d entries, "
427 "dropping packets(s).\n",
429 goto err_out_free_nskb
;
431 entry
->id
= ++queue
->id_sequence
;
432 *packet_id_ptr
= htonl(entry
->id
);
434 /* nfnetlink_unicast will either free the nskb or add it to a socket */
435 err
= nfnetlink_unicast(nskb
, &init_net
, queue
->peer_pid
, MSG_DONTWAIT
);
437 queue
->queue_user_dropped
++;
441 __enqueue_entry(queue
, entry
);
443 spin_unlock_bh(&queue
->lock
);
449 spin_unlock_bh(&queue
->lock
);
455 nfqnl_mangle(void *data
, int data_len
, struct nf_queue_entry
*e
)
457 struct sk_buff
*nskb
;
460 diff
= data_len
- e
->skb
->len
;
462 if (pskb_trim(e
->skb
, data_len
))
464 } else if (diff
> 0) {
465 if (data_len
> 0xFFFF)
467 if (diff
> skb_tailroom(e
->skb
)) {
468 nskb
= skb_copy_expand(e
->skb
, skb_headroom(e
->skb
),
471 printk(KERN_WARNING
"nf_queue: OOM "
472 "in mangle, dropping packet\n");
478 skb_put(e
->skb
, diff
);
480 if (!skb_make_writable(e
->skb
, data_len
))
482 skb_copy_to_linear_data(e
->skb
, data
, data_len
);
483 e
->skb
->ip_summed
= CHECKSUM_NONE
;
488 nfqnl_set_mode(struct nfqnl_instance
*queue
,
489 unsigned char mode
, unsigned int range
)
493 spin_lock_bh(&queue
->lock
);
495 case NFQNL_COPY_NONE
:
496 case NFQNL_COPY_META
:
497 queue
->copy_mode
= mode
;
498 queue
->copy_range
= 0;
501 case NFQNL_COPY_PACKET
:
502 queue
->copy_mode
= mode
;
503 /* we're using struct nlattr which has 16bit nla_len */
505 queue
->copy_range
= 0xffff;
507 queue
->copy_range
= range
;
514 spin_unlock_bh(&queue
->lock
);
520 dev_cmp(struct nf_queue_entry
*entry
, unsigned long ifindex
)
523 if (entry
->indev
->ifindex
== ifindex
)
526 if (entry
->outdev
->ifindex
== ifindex
)
528 #ifdef CONFIG_BRIDGE_NETFILTER
529 if (entry
->skb
->nf_bridge
) {
530 if (entry
->skb
->nf_bridge
->physindev
&&
531 entry
->skb
->nf_bridge
->physindev
->ifindex
== ifindex
)
533 if (entry
->skb
->nf_bridge
->physoutdev
&&
534 entry
->skb
->nf_bridge
->physoutdev
->ifindex
== ifindex
)
541 /* drop all packets with either indev or outdev == ifindex from all queue
544 nfqnl_dev_drop(int ifindex
)
550 for (i
= 0; i
< INSTANCE_BUCKETS
; i
++) {
551 struct hlist_node
*tmp
;
552 struct nfqnl_instance
*inst
;
553 struct hlist_head
*head
= &instance_table
[i
];
555 hlist_for_each_entry_rcu(inst
, tmp
, head
, hlist
)
556 nfqnl_flush(inst
, dev_cmp
, ifindex
);
562 #define RCV_SKB_FAIL(err) do { netlink_ack(skb, nlh, (err)); return; } while (0)
565 nfqnl_rcv_dev_event(struct notifier_block
*this,
566 unsigned long event
, void *ptr
)
568 struct net_device
*dev
= ptr
;
570 if (!net_eq(dev_net(dev
), &init_net
))
573 /* Drop any packets associated with the downed device */
574 if (event
== NETDEV_DOWN
)
575 nfqnl_dev_drop(dev
->ifindex
);
579 static struct notifier_block nfqnl_dev_notifier
= {
580 .notifier_call
= nfqnl_rcv_dev_event
,
584 nfqnl_rcv_nl_event(struct notifier_block
*this,
585 unsigned long event
, void *ptr
)
587 struct netlink_notify
*n
= ptr
;
589 if (event
== NETLINK_URELEASE
&& n
->protocol
== NETLINK_NETFILTER
) {
592 /* destroy all instances for this pid */
593 spin_lock(&instances_lock
);
594 for (i
= 0; i
< INSTANCE_BUCKETS
; i
++) {
595 struct hlist_node
*tmp
, *t2
;
596 struct nfqnl_instance
*inst
;
597 struct hlist_head
*head
= &instance_table
[i
];
599 hlist_for_each_entry_safe(inst
, tmp
, t2
, head
, hlist
) {
600 if ((n
->net
== &init_net
) &&
601 (n
->pid
== inst
->peer_pid
))
602 __instance_destroy(inst
);
605 spin_unlock(&instances_lock
);
610 static struct notifier_block nfqnl_rtnl_notifier
= {
611 .notifier_call
= nfqnl_rcv_nl_event
,
614 static const struct nla_policy nfqa_verdict_policy
[NFQA_MAX
+1] = {
615 [NFQA_VERDICT_HDR
] = { .len
= sizeof(struct nfqnl_msg_verdict_hdr
) },
616 [NFQA_MARK
] = { .type
= NLA_U32
},
617 [NFQA_PAYLOAD
] = { .type
= NLA_UNSPEC
},
620 static const struct nla_policy nfqa_verdict_batch_policy
[NFQA_MAX
+1] = {
621 [NFQA_VERDICT_HDR
] = { .len
= sizeof(struct nfqnl_msg_verdict_hdr
) },
622 [NFQA_MARK
] = { .type
= NLA_U32
},
625 static struct nfqnl_instance
*verdict_instance_lookup(u16 queue_num
, int nlpid
)
627 struct nfqnl_instance
*queue
;
629 queue
= instance_lookup(queue_num
);
631 return ERR_PTR(-ENODEV
);
633 if (queue
->peer_pid
!= nlpid
)
634 return ERR_PTR(-EPERM
);
639 static struct nfqnl_msg_verdict_hdr
*
640 verdicthdr_get(const struct nlattr
* const nfqa
[])
642 struct nfqnl_msg_verdict_hdr
*vhdr
;
643 unsigned int verdict
;
645 if (!nfqa
[NFQA_VERDICT_HDR
])
648 vhdr
= nla_data(nfqa
[NFQA_VERDICT_HDR
]);
649 verdict
= ntohl(vhdr
->verdict
);
650 if ((verdict
& NF_VERDICT_MASK
) > NF_MAX_VERDICT
)
655 static int nfq_id_after(unsigned int id
, unsigned int max
)
657 return (int)(id
- max
) > 0;
661 nfqnl_recv_verdict_batch(struct sock
*ctnl
, struct sk_buff
*skb
,
662 const struct nlmsghdr
*nlh
,
663 const struct nlattr
* const nfqa
[])
665 struct nfgenmsg
*nfmsg
= NLMSG_DATA(nlh
);
666 struct nf_queue_entry
*entry
, *tmp
;
667 unsigned int verdict
, maxid
;
668 struct nfqnl_msg_verdict_hdr
*vhdr
;
669 struct nfqnl_instance
*queue
;
670 LIST_HEAD(batch_list
);
671 u16 queue_num
= ntohs(nfmsg
->res_id
);
673 queue
= verdict_instance_lookup(queue_num
, NETLINK_CB(skb
).pid
);
675 return PTR_ERR(queue
);
677 vhdr
= verdicthdr_get(nfqa
);
681 verdict
= ntohl(vhdr
->verdict
);
682 maxid
= ntohl(vhdr
->id
);
684 spin_lock_bh(&queue
->lock
);
686 list_for_each_entry_safe(entry
, tmp
, &queue
->queue_list
, list
) {
687 if (nfq_id_after(entry
->id
, maxid
))
689 __dequeue_entry(queue
, entry
);
690 list_add_tail(&entry
->list
, &batch_list
);
693 spin_unlock_bh(&queue
->lock
);
695 if (list_empty(&batch_list
))
698 list_for_each_entry_safe(entry
, tmp
, &batch_list
, list
) {
700 entry
->skb
->mark
= ntohl(nla_get_be32(nfqa
[NFQA_MARK
]));
701 nf_reinject(entry
, verdict
);
707 nfqnl_recv_verdict(struct sock
*ctnl
, struct sk_buff
*skb
,
708 const struct nlmsghdr
*nlh
,
709 const struct nlattr
* const nfqa
[])
711 struct nfgenmsg
*nfmsg
= NLMSG_DATA(nlh
);
712 u_int16_t queue_num
= ntohs(nfmsg
->res_id
);
714 struct nfqnl_msg_verdict_hdr
*vhdr
;
715 struct nfqnl_instance
*queue
;
716 unsigned int verdict
;
717 struct nf_queue_entry
*entry
;
719 queue
= instance_lookup(queue_num
);
722 queue
= verdict_instance_lookup(queue_num
, NETLINK_CB(skb
).pid
);
724 return PTR_ERR(queue
);
726 vhdr
= verdicthdr_get(nfqa
);
730 verdict
= ntohl(vhdr
->verdict
);
732 entry
= find_dequeue_entry(queue
, ntohl(vhdr
->id
));
736 if (nfqa
[NFQA_PAYLOAD
]) {
737 if (nfqnl_mangle(nla_data(nfqa
[NFQA_PAYLOAD
]),
738 nla_len(nfqa
[NFQA_PAYLOAD
]), entry
) < 0)
743 entry
->skb
->mark
= ntohl(nla_get_be32(nfqa
[NFQA_MARK
]));
745 nf_reinject(entry
, verdict
);
750 nfqnl_recv_unsupp(struct sock
*ctnl
, struct sk_buff
*skb
,
751 const struct nlmsghdr
*nlh
,
752 const struct nlattr
* const nfqa
[])
757 static const struct nla_policy nfqa_cfg_policy
[NFQA_CFG_MAX
+1] = {
758 [NFQA_CFG_CMD
] = { .len
= sizeof(struct nfqnl_msg_config_cmd
) },
759 [NFQA_CFG_PARAMS
] = { .len
= sizeof(struct nfqnl_msg_config_params
) },
762 static const struct nf_queue_handler nfqh
= {
764 .outfn
= &nfqnl_enqueue_packet
,
768 nfqnl_recv_config(struct sock
*ctnl
, struct sk_buff
*skb
,
769 const struct nlmsghdr
*nlh
,
770 const struct nlattr
* const nfqa
[])
772 struct nfgenmsg
*nfmsg
= NLMSG_DATA(nlh
);
773 u_int16_t queue_num
= ntohs(nfmsg
->res_id
);
774 struct nfqnl_instance
*queue
;
775 struct nfqnl_msg_config_cmd
*cmd
= NULL
;
778 if (nfqa
[NFQA_CFG_CMD
]) {
779 cmd
= nla_data(nfqa
[NFQA_CFG_CMD
]);
781 /* Commands without queue context - might sleep */
782 switch (cmd
->command
) {
783 case NFQNL_CFG_CMD_PF_BIND
:
784 return nf_register_queue_handler(ntohs(cmd
->pf
),
786 case NFQNL_CFG_CMD_PF_UNBIND
:
787 return nf_unregister_queue_handler(ntohs(cmd
->pf
),
793 queue
= instance_lookup(queue_num
);
794 if (queue
&& queue
->peer_pid
!= NETLINK_CB(skb
).pid
) {
800 switch (cmd
->command
) {
801 case NFQNL_CFG_CMD_BIND
:
806 queue
= instance_create(queue_num
, NETLINK_CB(skb
).pid
);
808 ret
= PTR_ERR(queue
);
812 case NFQNL_CFG_CMD_UNBIND
:
817 instance_destroy(queue
);
819 case NFQNL_CFG_CMD_PF_BIND
:
820 case NFQNL_CFG_CMD_PF_UNBIND
:
828 if (nfqa
[NFQA_CFG_PARAMS
]) {
829 struct nfqnl_msg_config_params
*params
;
835 params
= nla_data(nfqa
[NFQA_CFG_PARAMS
]);
836 nfqnl_set_mode(queue
, params
->copy_mode
,
837 ntohl(params
->copy_range
));
840 if (nfqa
[NFQA_CFG_QUEUE_MAXLEN
]) {
841 __be32
*queue_maxlen
;
847 queue_maxlen
= nla_data(nfqa
[NFQA_CFG_QUEUE_MAXLEN
]);
848 spin_lock_bh(&queue
->lock
);
849 queue
->queue_maxlen
= ntohl(*queue_maxlen
);
850 spin_unlock_bh(&queue
->lock
);
858 static const struct nfnl_callback nfqnl_cb
[NFQNL_MSG_MAX
] = {
859 [NFQNL_MSG_PACKET
] = { .call_rcu
= nfqnl_recv_unsupp
,
860 .attr_count
= NFQA_MAX
, },
861 [NFQNL_MSG_VERDICT
] = { .call_rcu
= nfqnl_recv_verdict
,
862 .attr_count
= NFQA_MAX
,
863 .policy
= nfqa_verdict_policy
},
864 [NFQNL_MSG_CONFIG
] = { .call
= nfqnl_recv_config
,
865 .attr_count
= NFQA_CFG_MAX
,
866 .policy
= nfqa_cfg_policy
},
867 [NFQNL_MSG_VERDICT_BATCH
]={ .call_rcu
= nfqnl_recv_verdict_batch
,
868 .attr_count
= NFQA_MAX
,
869 .policy
= nfqa_verdict_batch_policy
},
872 static const struct nfnetlink_subsystem nfqnl_subsys
= {
874 .subsys_id
= NFNL_SUBSYS_QUEUE
,
875 .cb_count
= NFQNL_MSG_MAX
,
879 #ifdef CONFIG_PROC_FS
884 static struct hlist_node
*get_first(struct seq_file
*seq
)
886 struct iter_state
*st
= seq
->private;
891 for (st
->bucket
= 0; st
->bucket
< INSTANCE_BUCKETS
; st
->bucket
++) {
892 if (!hlist_empty(&instance_table
[st
->bucket
]))
893 return instance_table
[st
->bucket
].first
;
898 static struct hlist_node
*get_next(struct seq_file
*seq
, struct hlist_node
*h
)
900 struct iter_state
*st
= seq
->private;
904 if (++st
->bucket
>= INSTANCE_BUCKETS
)
907 h
= instance_table
[st
->bucket
].first
;
912 static struct hlist_node
*get_idx(struct seq_file
*seq
, loff_t pos
)
914 struct hlist_node
*head
;
915 head
= get_first(seq
);
918 while (pos
&& (head
= get_next(seq
, head
)))
920 return pos
? NULL
: head
;
923 static void *seq_start(struct seq_file
*seq
, loff_t
*pos
)
924 __acquires(instances_lock
)
926 spin_lock(&instances_lock
);
927 return get_idx(seq
, *pos
);
930 static void *seq_next(struct seq_file
*s
, void *v
, loff_t
*pos
)
933 return get_next(s
, v
);
936 static void seq_stop(struct seq_file
*s
, void *v
)
937 __releases(instances_lock
)
939 spin_unlock(&instances_lock
);
942 static int seq_show(struct seq_file
*s
, void *v
)
944 const struct nfqnl_instance
*inst
= v
;
946 return seq_printf(s
, "%5d %6d %5d %1d %5d %5d %5d %8d %2d\n",
948 inst
->peer_pid
, inst
->queue_total
,
949 inst
->copy_mode
, inst
->copy_range
,
950 inst
->queue_dropped
, inst
->queue_user_dropped
,
951 inst
->id_sequence
, 1);
954 static const struct seq_operations nfqnl_seq_ops
= {
961 static int nfqnl_open(struct inode
*inode
, struct file
*file
)
963 return seq_open_private(file
, &nfqnl_seq_ops
,
964 sizeof(struct iter_state
));
967 static const struct file_operations nfqnl_file_ops
= {
968 .owner
= THIS_MODULE
,
972 .release
= seq_release_private
,
977 static int __init
nfnetlink_queue_init(void)
979 int i
, status
= -ENOMEM
;
981 for (i
= 0; i
< INSTANCE_BUCKETS
; i
++)
982 INIT_HLIST_HEAD(&instance_table
[i
]);
984 netlink_register_notifier(&nfqnl_rtnl_notifier
);
985 status
= nfnetlink_subsys_register(&nfqnl_subsys
);
987 printk(KERN_ERR
"nf_queue: failed to create netlink socket\n");
988 goto cleanup_netlink_notifier
;
991 #ifdef CONFIG_PROC_FS
992 if (!proc_create("nfnetlink_queue", 0440,
993 proc_net_netfilter
, &nfqnl_file_ops
))
997 register_netdevice_notifier(&nfqnl_dev_notifier
);
1000 #ifdef CONFIG_PROC_FS
1002 nfnetlink_subsys_unregister(&nfqnl_subsys
);
1004 cleanup_netlink_notifier
:
1005 netlink_unregister_notifier(&nfqnl_rtnl_notifier
);
1009 static void __exit
nfnetlink_queue_fini(void)
1011 nf_unregister_queue_handlers(&nfqh
);
1012 unregister_netdevice_notifier(&nfqnl_dev_notifier
);
1013 #ifdef CONFIG_PROC_FS
1014 remove_proc_entry("nfnetlink_queue", proc_net_netfilter
);
1016 nfnetlink_subsys_unregister(&nfqnl_subsys
);
1017 netlink_unregister_notifier(&nfqnl_rtnl_notifier
);
1019 rcu_barrier(); /* Wait for completion of call_rcu()'s */
1022 MODULE_DESCRIPTION("netfilter packet queue handler");
1023 MODULE_AUTHOR("Harald Welte <laforge@netfilter.org>");
1024 MODULE_LICENSE("GPL");
1025 MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_QUEUE
);
1027 module_init(nfnetlink_queue_init
);
1028 module_exit(nfnetlink_queue_fini
);