1 // SPDX-License-Identifier: GPL-2.0-only
3 * This is a module which is used for queueing packets and communicating with
4 * userspace via nfnetlink.
6 * (C) 2005 by Harald Welte <laforge@netfilter.org>
7 * (C) 2007 by Patrick McHardy <kaber@trash.net>
9 * Based on the old ipv4-only ip_queue.c:
10 * (C) 2000-2002 James Morris <jmorris@intercode.com.au>
11 * (C) 2003-2005 Netfilter Core Team <coreteam@netfilter.org>
14 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
16 #include <linux/module.h>
17 #include <linux/skbuff.h>
18 #include <linux/init.h>
19 #include <linux/spinlock.h>
20 #include <linux/slab.h>
21 #include <linux/notifier.h>
22 #include <linux/netdevice.h>
23 #include <linux/netfilter.h>
24 #include <linux/proc_fs.h>
25 #include <linux/netfilter_ipv4.h>
26 #include <linux/netfilter_ipv6.h>
27 #include <linux/netfilter_bridge.h>
28 #include <linux/netfilter/nfnetlink.h>
29 #include <linux/netfilter/nfnetlink_queue.h>
30 #include <linux/netfilter/nf_conntrack_common.h>
31 #include <linux/list.h>
32 #include <linux/cgroup-defs.h>
35 #include <net/tcp_states.h>
36 #include <net/netfilter/nf_queue.h>
37 #include <net/netns/generic.h>
39 #include <linux/atomic.h>
41 #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
42 #include "../bridge/br_private.h"
45 #if IS_ENABLED(CONFIG_NF_CONNTRACK)
46 #include <net/netfilter/nf_conntrack.h>
49 #define NFQNL_QMAX_DEFAULT 1024
51 /* We're using struct nlattr which has 16bit nla_len. Note that nla_len
52 * includes the header length. Thus, the maximum packet length that we
53 * support is 65531 bytes. We send truncated packets if the specified length
54 * is larger than that. Userspace can check for presence of NFQA_CAP_LEN
55 * attribute to detect truncation.
57 #define NFQNL_MAX_COPY_RANGE (0xffff - NLA_HDRLEN)
59 struct nfqnl_instance
{
60 struct hlist_node hlist
; /* global list of queues */
64 unsigned int queue_maxlen
;
65 unsigned int copy_range
;
66 unsigned int queue_dropped
;
67 unsigned int queue_user_dropped
;
70 u_int16_t queue_num
; /* number of this queue */
72 u_int32_t flags
; /* Set using NFQA_CFG_FLAGS */
74 * Following fields are dirtied for each queued packet,
75 * keep them in same cache line if possible.
77 spinlock_t lock ____cacheline_aligned_in_smp
;
78 unsigned int queue_total
;
79 unsigned int id_sequence
; /* 'sequence' of pkt ids */
80 struct list_head queue_list
; /* packets in queue */
83 typedef int (*nfqnl_cmpfn
)(struct nf_queue_entry
*, unsigned long);
85 static unsigned int nfnl_queue_net_id __read_mostly
;
87 #define INSTANCE_BUCKETS 16
88 struct nfnl_queue_net
{
89 spinlock_t instances_lock
;
90 struct hlist_head instance_table
[INSTANCE_BUCKETS
];
93 static struct nfnl_queue_net
*nfnl_queue_pernet(struct net
*net
)
95 return net_generic(net
, nfnl_queue_net_id
);
98 static inline u_int8_t
instance_hashfn(u_int16_t queue_num
)
100 return ((queue_num
>> 8) ^ queue_num
) % INSTANCE_BUCKETS
;
103 static struct nfqnl_instance
*
104 instance_lookup(struct nfnl_queue_net
*q
, u_int16_t queue_num
)
106 struct hlist_head
*head
;
107 struct nfqnl_instance
*inst
;
109 head
= &q
->instance_table
[instance_hashfn(queue_num
)];
110 hlist_for_each_entry_rcu(inst
, head
, hlist
) {
111 if (inst
->queue_num
== queue_num
)
117 static struct nfqnl_instance
*
118 instance_create(struct nfnl_queue_net
*q
, u_int16_t queue_num
, u32 portid
)
120 struct nfqnl_instance
*inst
;
124 spin_lock(&q
->instances_lock
);
125 if (instance_lookup(q
, queue_num
)) {
130 inst
= kzalloc(sizeof(*inst
), GFP_ATOMIC
);
136 inst
->queue_num
= queue_num
;
137 inst
->peer_portid
= portid
;
138 inst
->queue_maxlen
= NFQNL_QMAX_DEFAULT
;
139 inst
->copy_range
= NFQNL_MAX_COPY_RANGE
;
140 inst
->copy_mode
= NFQNL_COPY_NONE
;
141 spin_lock_init(&inst
->lock
);
142 INIT_LIST_HEAD(&inst
->queue_list
);
144 if (!try_module_get(THIS_MODULE
)) {
149 h
= instance_hashfn(queue_num
);
150 hlist_add_head_rcu(&inst
->hlist
, &q
->instance_table
[h
]);
152 spin_unlock(&q
->instances_lock
);
159 spin_unlock(&q
->instances_lock
);
163 static void nfqnl_flush(struct nfqnl_instance
*queue
, nfqnl_cmpfn cmpfn
,
167 instance_destroy_rcu(struct rcu_head
*head
)
169 struct nfqnl_instance
*inst
= container_of(head
, struct nfqnl_instance
,
173 nfqnl_flush(inst
, NULL
, 0);
176 module_put(THIS_MODULE
);
180 __instance_destroy(struct nfqnl_instance
*inst
)
182 hlist_del_rcu(&inst
->hlist
);
183 call_rcu(&inst
->rcu
, instance_destroy_rcu
);
187 instance_destroy(struct nfnl_queue_net
*q
, struct nfqnl_instance
*inst
)
189 spin_lock(&q
->instances_lock
);
190 __instance_destroy(inst
);
191 spin_unlock(&q
->instances_lock
);
195 __enqueue_entry(struct nfqnl_instance
*queue
, struct nf_queue_entry
*entry
)
197 list_add_tail(&entry
->list
, &queue
->queue_list
);
198 queue
->queue_total
++;
202 __dequeue_entry(struct nfqnl_instance
*queue
, struct nf_queue_entry
*entry
)
204 list_del(&entry
->list
);
205 queue
->queue_total
--;
208 static struct nf_queue_entry
*
209 find_dequeue_entry(struct nfqnl_instance
*queue
, unsigned int id
)
211 struct nf_queue_entry
*entry
= NULL
, *i
;
213 spin_lock_bh(&queue
->lock
);
215 list_for_each_entry(i
, &queue
->queue_list
, list
) {
223 __dequeue_entry(queue
, entry
);
225 spin_unlock_bh(&queue
->lock
);
230 static unsigned int nf_iterate(struct sk_buff
*skb
,
231 struct nf_hook_state
*state
,
232 const struct nf_hook_entries
*hooks
,
235 const struct nf_hook_entry
*hook
;
236 unsigned int verdict
, i
= *index
;
238 while (i
< hooks
->num_hook_entries
) {
239 hook
= &hooks
->hooks
[i
];
241 verdict
= nf_hook_entry_hookfn(hook
, skb
, state
);
242 if (verdict
!= NF_ACCEPT
) {
244 if (verdict
!= NF_REPEAT
)
255 static struct nf_hook_entries
*nf_hook_entries_head(const struct net
*net
, u8 pf
, u8 hooknum
)
258 #ifdef CONFIG_NETFILTER_FAMILY_BRIDGE
260 return rcu_dereference(net
->nf
.hooks_bridge
[hooknum
]);
263 return rcu_dereference(net
->nf
.hooks_ipv4
[hooknum
]);
265 return rcu_dereference(net
->nf
.hooks_ipv6
[hooknum
]);
274 static int nf_ip_reroute(struct sk_buff
*skb
, const struct nf_queue_entry
*entry
)
277 const struct ip_rt_info
*rt_info
= nf_queue_entry_reroute(entry
);
279 if (entry
->state
.hook
== NF_INET_LOCAL_OUT
) {
280 const struct iphdr
*iph
= ip_hdr(skb
);
282 if (!(iph
->tos
== rt_info
->tos
&&
283 skb
->mark
== rt_info
->mark
&&
284 iph
->daddr
== rt_info
->daddr
&&
285 iph
->saddr
== rt_info
->saddr
))
286 return ip_route_me_harder(entry
->state
.net
, entry
->state
.sk
,
293 static int nf_reroute(struct sk_buff
*skb
, struct nf_queue_entry
*entry
)
295 const struct nf_ipv6_ops
*v6ops
;
298 switch (entry
->state
.pf
) {
300 ret
= nf_ip_reroute(skb
, entry
);
303 v6ops
= rcu_dereference(nf_ipv6_ops
);
305 ret
= v6ops
->reroute(skb
, entry
);
311 /* caller must hold rcu read-side lock */
312 static void nf_reinject(struct nf_queue_entry
*entry
, unsigned int verdict
)
314 const struct nf_hook_entry
*hook_entry
;
315 const struct nf_hook_entries
*hooks
;
316 struct sk_buff
*skb
= entry
->skb
;
317 const struct net
*net
;
322 net
= entry
->state
.net
;
323 pf
= entry
->state
.pf
;
325 hooks
= nf_hook_entries_head(net
, pf
, entry
->state
.hook
);
327 i
= entry
->hook_index
;
328 if (!hooks
|| i
>= hooks
->num_hook_entries
) {
329 kfree_skb_reason(skb
, SKB_DROP_REASON_NETFILTER_DROP
);
330 nf_queue_entry_free(entry
);
334 hook_entry
= &hooks
->hooks
[i
];
336 /* Continue traversal iff userspace said ok... */
337 if (verdict
== NF_REPEAT
)
338 verdict
= nf_hook_entry_hookfn(hook_entry
, skb
, &entry
->state
);
340 if (verdict
== NF_ACCEPT
) {
341 if (nf_reroute(skb
, entry
) < 0)
345 if (verdict
== NF_ACCEPT
) {
348 verdict
= nf_iterate(skb
, &entry
->state
, hooks
, &i
);
351 switch (verdict
& NF_VERDICT_MASK
) {
355 entry
->state
.okfn(entry
->state
.net
, entry
->state
.sk
, skb
);
359 err
= nf_queue(skb
, &entry
->state
, i
, verdict
);
369 nf_queue_entry_free(entry
);
372 static void nfqnl_reinject(struct nf_queue_entry
*entry
, unsigned int verdict
)
374 const struct nf_ct_hook
*ct_hook
;
376 if (verdict
== NF_ACCEPT
||
377 verdict
== NF_REPEAT
||
378 verdict
== NF_STOP
) {
379 unsigned int ct_verdict
= verdict
;
382 ct_hook
= rcu_dereference(nf_ct_hook
);
384 ct_verdict
= ct_hook
->update(entry
->state
.net
, entry
->skb
);
387 switch (ct_verdict
& NF_VERDICT_MASK
) {
389 /* follow userspace verdict, could be REPEAT */
392 nf_queue_entry_free(entry
);
395 verdict
= ct_verdict
& NF_VERDICT_MASK
;
399 nf_reinject(entry
, verdict
);
403 nfqnl_flush(struct nfqnl_instance
*queue
, nfqnl_cmpfn cmpfn
, unsigned long data
)
405 struct nf_queue_entry
*entry
, *next
;
407 spin_lock_bh(&queue
->lock
);
408 list_for_each_entry_safe(entry
, next
, &queue
->queue_list
, list
) {
409 if (!cmpfn
|| cmpfn(entry
, data
)) {
410 list_del(&entry
->list
);
411 queue
->queue_total
--;
412 nfqnl_reinject(entry
, NF_DROP
);
415 spin_unlock_bh(&queue
->lock
);
419 nfqnl_put_packet_info(struct sk_buff
*nlskb
, struct sk_buff
*packet
,
424 if (packet
->ip_summed
== CHECKSUM_PARTIAL
)
425 flags
= NFQA_SKB_CSUMNOTREADY
;
426 else if (csum_verify
)
427 flags
= NFQA_SKB_CSUM_NOTVERIFIED
;
429 if (skb_is_gso(packet
))
430 flags
|= NFQA_SKB_GSO
;
432 return flags
? nla_put_be32(nlskb
, NFQA_SKB_INFO
, htonl(flags
)) : 0;
435 static int nfqnl_put_sk_uidgid(struct sk_buff
*skb
, struct sock
*sk
)
437 const struct cred
*cred
;
439 if (!sk_fullsock(sk
))
442 read_lock_bh(&sk
->sk_callback_lock
);
443 if (sk
->sk_socket
&& sk
->sk_socket
->file
) {
444 cred
= sk
->sk_socket
->file
->f_cred
;
445 if (nla_put_be32(skb
, NFQA_UID
,
446 htonl(from_kuid_munged(&init_user_ns
, cred
->fsuid
))))
447 goto nla_put_failure
;
448 if (nla_put_be32(skb
, NFQA_GID
,
449 htonl(from_kgid_munged(&init_user_ns
, cred
->fsgid
))))
450 goto nla_put_failure
;
452 read_unlock_bh(&sk
->sk_callback_lock
);
456 read_unlock_bh(&sk
->sk_callback_lock
);
460 static int nfqnl_put_sk_classid(struct sk_buff
*skb
, struct sock
*sk
)
462 #if IS_ENABLED(CONFIG_CGROUP_NET_CLASSID)
463 if (sk
&& sk_fullsock(sk
)) {
464 u32 classid
= sock_cgroup_classid(&sk
->sk_cgrp_data
);
466 if (classid
&& nla_put_be32(skb
, NFQA_CGROUP_CLASSID
, htonl(classid
)))
473 static u32
nfqnl_get_sk_secctx(struct sk_buff
*skb
, char **secdata
)
476 #if IS_ENABLED(CONFIG_NETWORK_SECMARK)
477 if (!skb
|| !sk_fullsock(skb
->sk
))
480 read_lock_bh(&skb
->sk
->sk_callback_lock
);
483 security_secid_to_secctx(skb
->secmark
, secdata
, &seclen
);
485 read_unlock_bh(&skb
->sk
->sk_callback_lock
);
490 static u32
nfqnl_get_bridge_size(struct nf_queue_entry
*entry
)
492 struct sk_buff
*entskb
= entry
->skb
;
495 if (entry
->state
.pf
!= PF_BRIDGE
|| !skb_mac_header_was_set(entskb
))
498 if (skb_vlan_tag_present(entskb
))
499 nlalen
+= nla_total_size(nla_total_size(sizeof(__be16
)) +
500 nla_total_size(sizeof(__be16
)));
502 if (entskb
->network_header
> entskb
->mac_header
)
503 nlalen
+= nla_total_size((entskb
->network_header
-
504 entskb
->mac_header
));
509 static int nfqnl_put_bridge(struct nf_queue_entry
*entry
, struct sk_buff
*skb
)
511 struct sk_buff
*entskb
= entry
->skb
;
513 if (entry
->state
.pf
!= PF_BRIDGE
|| !skb_mac_header_was_set(entskb
))
516 if (skb_vlan_tag_present(entskb
)) {
519 nest
= nla_nest_start(skb
, NFQA_VLAN
);
521 goto nla_put_failure
;
523 if (nla_put_be16(skb
, NFQA_VLAN_TCI
, htons(entskb
->vlan_tci
)) ||
524 nla_put_be16(skb
, NFQA_VLAN_PROTO
, entskb
->vlan_proto
))
525 goto nla_put_failure
;
527 nla_nest_end(skb
, nest
);
530 if (entskb
->mac_header
< entskb
->network_header
) {
531 int len
= (int)(entskb
->network_header
- entskb
->mac_header
);
533 if (nla_put(skb
, NFQA_L2HDR
, len
, skb_mac_header(entskb
)))
534 goto nla_put_failure
;
543 static int nf_queue_checksum_help(struct sk_buff
*entskb
)
545 if (skb_csum_is_sctp(entskb
))
546 return skb_crc32c_csum_help(entskb
);
548 return skb_checksum_help(entskb
);
551 static struct sk_buff
*
552 nfqnl_build_packet_message(struct net
*net
, struct nfqnl_instance
*queue
,
553 struct nf_queue_entry
*entry
,
554 __be32
**packet_id_ptr
)
557 size_t data_len
= 0, cap_len
= 0;
558 unsigned int hlen
= 0;
561 struct nfqnl_msg_packet_hdr
*pmsg
;
562 struct nlmsghdr
*nlh
;
563 struct sk_buff
*entskb
= entry
->skb
;
564 struct net_device
*indev
;
565 struct net_device
*outdev
;
566 struct nf_conn
*ct
= NULL
;
567 enum ip_conntrack_info ctinfo
= 0;
568 const struct nfnl_ct_hook
*nfnl_ct
;
570 char *secdata
= NULL
;
574 size
= nlmsg_total_size(sizeof(struct nfgenmsg
))
575 + nla_total_size(sizeof(struct nfqnl_msg_packet_hdr
))
576 + nla_total_size(sizeof(u_int32_t
)) /* ifindex */
577 + nla_total_size(sizeof(u_int32_t
)) /* ifindex */
578 #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
579 + nla_total_size(sizeof(u_int32_t
)) /* ifindex */
580 + nla_total_size(sizeof(u_int32_t
)) /* ifindex */
582 + nla_total_size(sizeof(u_int32_t
)) /* mark */
583 + nla_total_size(sizeof(u_int32_t
)) /* priority */
584 + nla_total_size(sizeof(struct nfqnl_msg_packet_hw
))
585 + nla_total_size(sizeof(u_int32_t
)) /* skbinfo */
586 #if IS_ENABLED(CONFIG_CGROUP_NET_CLASSID)
587 + nla_total_size(sizeof(u_int32_t
)) /* classid */
589 + nla_total_size(sizeof(u_int32_t
)); /* cap_len */
591 tstamp
= skb_tstamp_cond(entskb
, false);
593 size
+= nla_total_size(sizeof(struct nfqnl_msg_packet_timestamp
));
595 size
+= nfqnl_get_bridge_size(entry
);
597 if (entry
->state
.hook
<= NF_INET_FORWARD
||
598 (entry
->state
.hook
== NF_INET_POST_ROUTING
&& entskb
->sk
== NULL
))
599 csum_verify
= !skb_csum_unnecessary(entskb
);
603 outdev
= entry
->state
.out
;
605 switch ((enum nfqnl_config_mode
)READ_ONCE(queue
->copy_mode
)) {
606 case NFQNL_COPY_META
:
607 case NFQNL_COPY_NONE
:
610 case NFQNL_COPY_PACKET
:
611 if (!(queue
->flags
& NFQA_CFG_F_GSO
) &&
612 entskb
->ip_summed
== CHECKSUM_PARTIAL
&&
613 nf_queue_checksum_help(entskb
))
616 data_len
= READ_ONCE(queue
->copy_range
);
617 if (data_len
> entskb
->len
)
618 data_len
= entskb
->len
;
620 hlen
= skb_zerocopy_headlen(entskb
);
621 hlen
= min_t(unsigned int, hlen
, data_len
);
622 size
+= sizeof(struct nlattr
) + hlen
;
623 cap_len
= entskb
->len
;
627 nfnl_ct
= rcu_dereference(nfnl_ct_hook
);
629 #if IS_ENABLED(CONFIG_NF_CONNTRACK)
630 if (queue
->flags
& NFQA_CFG_F_CONNTRACK
) {
631 if (nfnl_ct
!= NULL
) {
632 ct
= nf_ct_get(entskb
, &ctinfo
);
634 size
+= nfnl_ct
->build_size(ct
);
639 if (queue
->flags
& NFQA_CFG_F_UID_GID
) {
640 size
+= (nla_total_size(sizeof(u_int32_t
)) /* uid */
641 + nla_total_size(sizeof(u_int32_t
))); /* gid */
644 if ((queue
->flags
& NFQA_CFG_F_SECCTX
) && entskb
->sk
) {
645 seclen
= nfqnl_get_sk_secctx(entskb
, &secdata
);
647 size
+= nla_total_size(seclen
);
650 skb
= alloc_skb(size
, GFP_ATOMIC
);
652 skb_tx_error(entskb
);
656 nlh
= nfnl_msg_put(skb
, 0, 0,
657 nfnl_msg_type(NFNL_SUBSYS_QUEUE
, NFQNL_MSG_PACKET
),
658 0, entry
->state
.pf
, NFNETLINK_V0
,
659 htons(queue
->queue_num
));
661 skb_tx_error(entskb
);
666 nla
= __nla_reserve(skb
, NFQA_PACKET_HDR
, sizeof(*pmsg
));
667 pmsg
= nla_data(nla
);
668 pmsg
->hw_protocol
= entskb
->protocol
;
669 pmsg
->hook
= entry
->state
.hook
;
670 *packet_id_ptr
= &pmsg
->packet_id
;
672 indev
= entry
->state
.in
;
674 #if !IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
675 if (nla_put_be32(skb
, NFQA_IFINDEX_INDEV
, htonl(indev
->ifindex
)))
676 goto nla_put_failure
;
678 if (entry
->state
.pf
== PF_BRIDGE
) {
679 /* Case 1: indev is physical input device, we need to
680 * look for bridge group (when called from
681 * netfilter_bridge) */
682 if (nla_put_be32(skb
, NFQA_IFINDEX_PHYSINDEV
,
683 htonl(indev
->ifindex
)) ||
684 /* this is the bridge group "brX" */
685 /* rcu_read_lock()ed by __nf_queue */
686 nla_put_be32(skb
, NFQA_IFINDEX_INDEV
,
687 htonl(br_port_get_rcu(indev
)->br
->dev
->ifindex
)))
688 goto nla_put_failure
;
692 /* Case 2: indev is bridge group, we need to look for
693 * physical device (when called from ipv4) */
694 if (nla_put_be32(skb
, NFQA_IFINDEX_INDEV
,
695 htonl(indev
->ifindex
)))
696 goto nla_put_failure
;
698 physinif
= nf_bridge_get_physinif(entskb
);
700 nla_put_be32(skb
, NFQA_IFINDEX_PHYSINDEV
,
702 goto nla_put_failure
;
708 #if !IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
709 if (nla_put_be32(skb
, NFQA_IFINDEX_OUTDEV
, htonl(outdev
->ifindex
)))
710 goto nla_put_failure
;
712 if (entry
->state
.pf
== PF_BRIDGE
) {
713 /* Case 1: outdev is physical output device, we need to
714 * look for bridge group (when called from
715 * netfilter_bridge) */
716 if (nla_put_be32(skb
, NFQA_IFINDEX_PHYSOUTDEV
,
717 htonl(outdev
->ifindex
)) ||
718 /* this is the bridge group "brX" */
719 /* rcu_read_lock()ed by __nf_queue */
720 nla_put_be32(skb
, NFQA_IFINDEX_OUTDEV
,
721 htonl(br_port_get_rcu(outdev
)->br
->dev
->ifindex
)))
722 goto nla_put_failure
;
726 /* Case 2: outdev is bridge group, we need to look for
727 * physical output device (when called from ipv4) */
728 if (nla_put_be32(skb
, NFQA_IFINDEX_OUTDEV
,
729 htonl(outdev
->ifindex
)))
730 goto nla_put_failure
;
732 physoutif
= nf_bridge_get_physoutif(entskb
);
734 nla_put_be32(skb
, NFQA_IFINDEX_PHYSOUTDEV
,
736 goto nla_put_failure
;
742 nla_put_be32(skb
, NFQA_MARK
, htonl(entskb
->mark
)))
743 goto nla_put_failure
;
745 if (entskb
->priority
&&
746 nla_put_be32(skb
, NFQA_PRIORITY
, htonl(entskb
->priority
)))
747 goto nla_put_failure
;
749 if (indev
&& entskb
->dev
&&
750 skb_mac_header_was_set(entskb
) &&
751 skb_mac_header_len(entskb
) != 0) {
752 struct nfqnl_msg_packet_hw phw
;
755 memset(&phw
, 0, sizeof(phw
));
756 len
= dev_parse_header(entskb
, phw
.hw_addr
);
758 phw
.hw_addrlen
= htons(len
);
759 if (nla_put(skb
, NFQA_HWADDR
, sizeof(phw
), &phw
))
760 goto nla_put_failure
;
764 if (nfqnl_put_bridge(entry
, skb
) < 0)
765 goto nla_put_failure
;
767 if (entry
->state
.hook
<= NF_INET_FORWARD
&& tstamp
) {
768 struct nfqnl_msg_packet_timestamp ts
;
769 struct timespec64 kts
= ktime_to_timespec64(tstamp
);
771 ts
.sec
= cpu_to_be64(kts
.tv_sec
);
772 ts
.usec
= cpu_to_be64(kts
.tv_nsec
/ NSEC_PER_USEC
);
774 if (nla_put(skb
, NFQA_TIMESTAMP
, sizeof(ts
), &ts
))
775 goto nla_put_failure
;
778 if ((queue
->flags
& NFQA_CFG_F_UID_GID
) && entskb
->sk
&&
779 nfqnl_put_sk_uidgid(skb
, entskb
->sk
) < 0)
780 goto nla_put_failure
;
782 if (nfqnl_put_sk_classid(skb
, entskb
->sk
) < 0)
783 goto nla_put_failure
;
785 if (seclen
&& nla_put(skb
, NFQA_SECCTX
, seclen
, secdata
))
786 goto nla_put_failure
;
788 if (ct
&& nfnl_ct
->build(skb
, ct
, ctinfo
, NFQA_CT
, NFQA_CT_INFO
) < 0)
789 goto nla_put_failure
;
791 if (cap_len
> data_len
&&
792 nla_put_be32(skb
, NFQA_CAP_LEN
, htonl(cap_len
)))
793 goto nla_put_failure
;
795 if (nfqnl_put_packet_info(skb
, entskb
, csum_verify
))
796 goto nla_put_failure
;
801 if (skb_tailroom(skb
) < sizeof(*nla
) + hlen
)
802 goto nla_put_failure
;
804 nla
= skb_put(skb
, sizeof(*nla
));
805 nla
->nla_type
= NFQA_PAYLOAD
;
806 nla
->nla_len
= nla_attr_size(data_len
);
808 if (skb_zerocopy(skb
, entskb
, data_len
, hlen
))
809 goto nla_put_failure
;
812 nlh
->nlmsg_len
= skb
->len
;
814 security_release_secctx(secdata
, seclen
);
818 skb_tx_error(entskb
);
820 net_err_ratelimited("nf_queue: error creating packet message\n");
823 security_release_secctx(secdata
, seclen
);
827 static bool nf_ct_drop_unconfirmed(const struct nf_queue_entry
*entry
)
829 #if IS_ENABLED(CONFIG_NF_CONNTRACK)
830 static const unsigned long flags
= IPS_CONFIRMED
| IPS_DYING
;
831 struct nf_conn
*ct
= (void *)skb_nfct(entry
->skb
);
832 unsigned long status
;
838 status
= READ_ONCE(ct
->status
);
839 if ((status
& flags
) == IPS_DYING
)
842 if (status
& IPS_CONFIRMED
)
845 /* in some cases skb_clone() can occur after initial conntrack
846 * pickup, but conntrack assumes exclusive skb->_nfct ownership for
847 * unconfirmed entries.
849 * This happens for br_netfilter and with ip multicast routing.
850 * We can't be solved with serialization here because one clone could
851 * have been queued for local delivery.
853 use
= refcount_read(&ct
->ct_general
.use
);
854 if (likely(use
== 1))
857 /* Can't decrement further? Exclusive ownership. */
858 if (!refcount_dec_not_one(&ct
->ct_general
.use
))
861 skb_set_nfct(entry
->skb
, 0);
862 /* No nf_ct_put(): we already decremented .use and it cannot
871 __nfqnl_enqueue_packet(struct net
*net
, struct nfqnl_instance
*queue
,
872 struct nf_queue_entry
*entry
)
874 struct sk_buff
*nskb
;
876 __be32
*packet_id_ptr
;
879 nskb
= nfqnl_build_packet_message(net
, queue
, entry
, &packet_id_ptr
);
884 spin_lock_bh(&queue
->lock
);
886 if (nf_ct_drop_unconfirmed(entry
))
887 goto err_out_free_nskb
;
889 if (queue
->queue_total
>= queue
->queue_maxlen
) {
890 if (queue
->flags
& NFQA_CFG_F_FAIL_OPEN
) {
894 queue
->queue_dropped
++;
895 net_warn_ratelimited("nf_queue: full at %d entries, dropping packets(s)\n",
898 goto err_out_free_nskb
;
900 entry
->id
= ++queue
->id_sequence
;
901 *packet_id_ptr
= htonl(entry
->id
);
903 /* nfnetlink_unicast will either free the nskb or add it to a socket */
904 err
= nfnetlink_unicast(nskb
, net
, queue
->peer_portid
);
906 if (queue
->flags
& NFQA_CFG_F_FAIL_OPEN
) {
910 queue
->queue_user_dropped
++;
915 __enqueue_entry(queue
, entry
);
917 spin_unlock_bh(&queue
->lock
);
923 spin_unlock_bh(&queue
->lock
);
925 nfqnl_reinject(entry
, NF_ACCEPT
);
930 static struct nf_queue_entry
*
931 nf_queue_entry_dup(struct nf_queue_entry
*e
)
933 struct nf_queue_entry
*entry
= kmemdup(e
, e
->size
, GFP_ATOMIC
);
938 if (nf_queue_entry_get_refs(entry
))
945 #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
946 /* When called from bridge netfilter, skb->data must point to MAC header
947 * before calling skb_gso_segment(). Else, original MAC header is lost
948 * and segmented skbs will be sent to wrong destination.
950 static void nf_bridge_adjust_skb_data(struct sk_buff
*skb
)
952 if (nf_bridge_info_get(skb
))
953 __skb_push(skb
, skb
->network_header
- skb
->mac_header
);
956 static void nf_bridge_adjust_segmented_data(struct sk_buff
*skb
)
958 if (nf_bridge_info_get(skb
))
959 __skb_pull(skb
, skb
->network_header
- skb
->mac_header
);
962 #define nf_bridge_adjust_skb_data(s) do {} while (0)
963 #define nf_bridge_adjust_segmented_data(s) do {} while (0)
967 __nfqnl_enqueue_packet_gso(struct net
*net
, struct nfqnl_instance
*queue
,
968 struct sk_buff
*skb
, struct nf_queue_entry
*entry
)
971 struct nf_queue_entry
*entry_seg
;
973 nf_bridge_adjust_segmented_data(skb
);
975 if (skb
->next
== NULL
) { /* last packet, no need to copy entry */
976 struct sk_buff
*gso_skb
= entry
->skb
;
978 ret
= __nfqnl_enqueue_packet(net
, queue
, entry
);
980 entry
->skb
= gso_skb
;
984 skb_mark_not_on_list(skb
);
986 entry_seg
= nf_queue_entry_dup(entry
);
988 entry_seg
->skb
= skb
;
989 ret
= __nfqnl_enqueue_packet(net
, queue
, entry_seg
);
991 nf_queue_entry_free(entry_seg
);
997 nfqnl_enqueue_packet(struct nf_queue_entry
*entry
, unsigned int queuenum
)
1000 struct nfqnl_instance
*queue
;
1001 struct sk_buff
*skb
, *segs
, *nskb
;
1003 struct net
*net
= entry
->state
.net
;
1004 struct nfnl_queue_net
*q
= nfnl_queue_pernet(net
);
1006 /* rcu_read_lock()ed by nf_hook_thresh */
1007 queue
= instance_lookup(q
, queuenum
);
1011 if (queue
->copy_mode
== NFQNL_COPY_NONE
)
1016 switch (entry
->state
.pf
) {
1018 skb
->protocol
= htons(ETH_P_IP
);
1021 skb
->protocol
= htons(ETH_P_IPV6
);
1025 if (!skb_is_gso(skb
) || ((queue
->flags
& NFQA_CFG_F_GSO
) && !skb_is_gso_sctp(skb
)))
1026 return __nfqnl_enqueue_packet(net
, queue
, entry
);
1028 nf_bridge_adjust_skb_data(skb
);
1029 segs
= skb_gso_segment(skb
, 0);
1030 /* Does not use PTR_ERR to limit the number of error codes that can be
1031 * returned by nf_queue. For instance, callers rely on -ESRCH to
1032 * mean 'ignore this hook'.
1034 if (IS_ERR_OR_NULL(segs
))
1038 skb_list_walk_safe(segs
, segs
, nskb
) {
1040 err
= __nfqnl_enqueue_packet_gso(net
, queue
,
1049 if (err
) /* some segments are already queued */
1050 nf_queue_entry_free(entry
);
1055 nf_bridge_adjust_segmented_data(skb
);
1060 nfqnl_mangle(void *data
, unsigned int data_len
, struct nf_queue_entry
*e
, int diff
)
1062 struct sk_buff
*nskb
;
1065 unsigned int min_len
= skb_transport_offset(e
->skb
);
1067 if (data_len
< min_len
)
1070 if (pskb_trim(e
->skb
, data_len
))
1072 } else if (diff
> 0) {
1073 if (data_len
> 0xFFFF)
1075 if (diff
> skb_tailroom(e
->skb
)) {
1076 nskb
= skb_copy_expand(e
->skb
, skb_headroom(e
->skb
),
1083 skb_put(e
->skb
, diff
);
1085 if (skb_ensure_writable(e
->skb
, data_len
))
1087 skb_copy_to_linear_data(e
->skb
, data
, data_len
);
1088 e
->skb
->ip_summed
= CHECKSUM_NONE
;
1093 nfqnl_set_mode(struct nfqnl_instance
*queue
,
1094 unsigned char mode
, unsigned int range
)
1098 spin_lock_bh(&queue
->lock
);
1100 case NFQNL_COPY_NONE
:
1101 case NFQNL_COPY_META
:
1102 queue
->copy_mode
= mode
;
1103 queue
->copy_range
= 0;
1106 case NFQNL_COPY_PACKET
:
1107 queue
->copy_mode
= mode
;
1108 if (range
== 0 || range
> NFQNL_MAX_COPY_RANGE
)
1109 queue
->copy_range
= NFQNL_MAX_COPY_RANGE
;
1111 queue
->copy_range
= range
;
1118 spin_unlock_bh(&queue
->lock
);
1124 dev_cmp(struct nf_queue_entry
*entry
, unsigned long ifindex
)
1126 #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
1127 int physinif
, physoutif
;
1129 physinif
= nf_bridge_get_physinif(entry
->skb
);
1130 physoutif
= nf_bridge_get_physoutif(entry
->skb
);
1132 if (physinif
== ifindex
|| physoutif
== ifindex
)
1135 if (entry
->state
.in
)
1136 if (entry
->state
.in
->ifindex
== ifindex
)
1138 if (entry
->state
.out
)
1139 if (entry
->state
.out
->ifindex
== ifindex
)
1145 /* drop all packets with either indev or outdev == ifindex from all queue
1148 nfqnl_dev_drop(struct net
*net
, int ifindex
)
1151 struct nfnl_queue_net
*q
= nfnl_queue_pernet(net
);
1155 for (i
= 0; i
< INSTANCE_BUCKETS
; i
++) {
1156 struct nfqnl_instance
*inst
;
1157 struct hlist_head
*head
= &q
->instance_table
[i
];
1159 hlist_for_each_entry_rcu(inst
, head
, hlist
)
1160 nfqnl_flush(inst
, dev_cmp
, ifindex
);
1167 nfqnl_rcv_dev_event(struct notifier_block
*this,
1168 unsigned long event
, void *ptr
)
1170 struct net_device
*dev
= netdev_notifier_info_to_dev(ptr
);
1172 /* Drop any packets associated with the downed device */
1173 if (event
== NETDEV_DOWN
)
1174 nfqnl_dev_drop(dev_net(dev
), dev
->ifindex
);
1178 static struct notifier_block nfqnl_dev_notifier
= {
1179 .notifier_call
= nfqnl_rcv_dev_event
,
1182 static void nfqnl_nf_hook_drop(struct net
*net
)
1184 struct nfnl_queue_net
*q
= nfnl_queue_pernet(net
);
1187 /* This function is also called on net namespace error unwind,
1188 * when pernet_ops->init() failed and ->exit() functions of the
1189 * previous pernet_ops gets called.
1191 * This may result in a call to nfqnl_nf_hook_drop() before
1192 * struct nfnl_queue_net was allocated.
1197 for (i
= 0; i
< INSTANCE_BUCKETS
; i
++) {
1198 struct nfqnl_instance
*inst
;
1199 struct hlist_head
*head
= &q
->instance_table
[i
];
1201 hlist_for_each_entry_rcu(inst
, head
, hlist
)
1202 nfqnl_flush(inst
, NULL
, 0);
1207 nfqnl_rcv_nl_event(struct notifier_block
*this,
1208 unsigned long event
, void *ptr
)
1210 struct netlink_notify
*n
= ptr
;
1211 struct nfnl_queue_net
*q
= nfnl_queue_pernet(n
->net
);
1213 if (event
== NETLINK_URELEASE
&& n
->protocol
== NETLINK_NETFILTER
) {
1216 /* destroy all instances for this portid */
1217 spin_lock(&q
->instances_lock
);
1218 for (i
= 0; i
< INSTANCE_BUCKETS
; i
++) {
1219 struct hlist_node
*t2
;
1220 struct nfqnl_instance
*inst
;
1221 struct hlist_head
*head
= &q
->instance_table
[i
];
1223 hlist_for_each_entry_safe(inst
, t2
, head
, hlist
) {
1224 if (n
->portid
== inst
->peer_portid
)
1225 __instance_destroy(inst
);
1228 spin_unlock(&q
->instances_lock
);
1233 static struct notifier_block nfqnl_rtnl_notifier
= {
1234 .notifier_call
= nfqnl_rcv_nl_event
,
1237 static const struct nla_policy nfqa_vlan_policy
[NFQA_VLAN_MAX
+ 1] = {
1238 [NFQA_VLAN_TCI
] = { .type
= NLA_U16
},
1239 [NFQA_VLAN_PROTO
] = { .type
= NLA_U16
},
1242 static const struct nla_policy nfqa_verdict_policy
[NFQA_MAX
+1] = {
1243 [NFQA_VERDICT_HDR
] = { .len
= sizeof(struct nfqnl_msg_verdict_hdr
) },
1244 [NFQA_MARK
] = { .type
= NLA_U32
},
1245 [NFQA_PAYLOAD
] = { .type
= NLA_UNSPEC
},
1246 [NFQA_CT
] = { .type
= NLA_UNSPEC
},
1247 [NFQA_EXP
] = { .type
= NLA_UNSPEC
},
1248 [NFQA_VLAN
] = { .type
= NLA_NESTED
},
1249 [NFQA_PRIORITY
] = { .type
= NLA_U32
},
1252 static const struct nla_policy nfqa_verdict_batch_policy
[NFQA_MAX
+1] = {
1253 [NFQA_VERDICT_HDR
] = { .len
= sizeof(struct nfqnl_msg_verdict_hdr
) },
1254 [NFQA_MARK
] = { .type
= NLA_U32
},
1255 [NFQA_PRIORITY
] = { .type
= NLA_U32
},
1258 static struct nfqnl_instance
*
1259 verdict_instance_lookup(struct nfnl_queue_net
*q
, u16 queue_num
, u32 nlportid
)
1261 struct nfqnl_instance
*queue
;
1263 queue
= instance_lookup(q
, queue_num
);
1265 return ERR_PTR(-ENODEV
);
1267 if (queue
->peer_portid
!= nlportid
)
1268 return ERR_PTR(-EPERM
);
1273 static struct nfqnl_msg_verdict_hdr
*
1274 verdicthdr_get(const struct nlattr
* const nfqa
[])
1276 struct nfqnl_msg_verdict_hdr
*vhdr
;
1277 unsigned int verdict
;
1279 if (!nfqa
[NFQA_VERDICT_HDR
])
1282 vhdr
= nla_data(nfqa
[NFQA_VERDICT_HDR
]);
1283 verdict
= ntohl(vhdr
->verdict
) & NF_VERDICT_MASK
;
1284 if (verdict
> NF_MAX_VERDICT
|| verdict
== NF_STOLEN
)
1289 static int nfq_id_after(unsigned int id
, unsigned int max
)
1291 return (int)(id
- max
) > 0;
1294 static int nfqnl_recv_verdict_batch(struct sk_buff
*skb
,
1295 const struct nfnl_info
*info
,
1296 const struct nlattr
* const nfqa
[])
1298 struct nfnl_queue_net
*q
= nfnl_queue_pernet(info
->net
);
1299 u16 queue_num
= ntohs(info
->nfmsg
->res_id
);
1300 struct nf_queue_entry
*entry
, *tmp
;
1301 struct nfqnl_msg_verdict_hdr
*vhdr
;
1302 struct nfqnl_instance
*queue
;
1303 unsigned int verdict
, maxid
;
1304 LIST_HEAD(batch_list
);
1306 queue
= verdict_instance_lookup(q
, queue_num
,
1307 NETLINK_CB(skb
).portid
);
1309 return PTR_ERR(queue
);
1311 vhdr
= verdicthdr_get(nfqa
);
1315 verdict
= ntohl(vhdr
->verdict
);
1316 maxid
= ntohl(vhdr
->id
);
1318 spin_lock_bh(&queue
->lock
);
1320 list_for_each_entry_safe(entry
, tmp
, &queue
->queue_list
, list
) {
1321 if (nfq_id_after(entry
->id
, maxid
))
1323 __dequeue_entry(queue
, entry
);
1324 list_add_tail(&entry
->list
, &batch_list
);
1327 spin_unlock_bh(&queue
->lock
);
1329 if (list_empty(&batch_list
))
1332 list_for_each_entry_safe(entry
, tmp
, &batch_list
, list
) {
1333 if (nfqa
[NFQA_MARK
])
1334 entry
->skb
->mark
= ntohl(nla_get_be32(nfqa
[NFQA_MARK
]));
1336 if (nfqa
[NFQA_PRIORITY
])
1337 entry
->skb
->priority
= ntohl(nla_get_be32(nfqa
[NFQA_PRIORITY
]));
1339 nfqnl_reinject(entry
, verdict
);
1344 static struct nf_conn
*nfqnl_ct_parse(const struct nfnl_ct_hook
*nfnl_ct
,
1345 const struct nlmsghdr
*nlh
,
1346 const struct nlattr
* const nfqa
[],
1347 struct nf_queue_entry
*entry
,
1348 enum ip_conntrack_info
*ctinfo
)
1350 #if IS_ENABLED(CONFIG_NF_CONNTRACK)
1353 ct
= nf_ct_get(entry
->skb
, ctinfo
);
1357 if (nfnl_ct
->parse(nfqa
[NFQA_CT
], ct
) < 0)
1361 nfnl_ct
->attach_expect(nfqa
[NFQA_EXP
], ct
,
1362 NETLINK_CB(entry
->skb
).portid
,
1370 static int nfqa_parse_bridge(struct nf_queue_entry
*entry
,
1371 const struct nlattr
* const nfqa
[])
1373 if (nfqa
[NFQA_VLAN
]) {
1374 struct nlattr
*tb
[NFQA_VLAN_MAX
+ 1];
1377 err
= nla_parse_nested_deprecated(tb
, NFQA_VLAN_MAX
,
1379 nfqa_vlan_policy
, NULL
);
1383 if (!tb
[NFQA_VLAN_TCI
] || !tb
[NFQA_VLAN_PROTO
])
1386 __vlan_hwaccel_put_tag(entry
->skb
,
1387 nla_get_be16(tb
[NFQA_VLAN_PROTO
]),
1388 ntohs(nla_get_be16(tb
[NFQA_VLAN_TCI
])));
1391 if (nfqa
[NFQA_L2HDR
]) {
1392 int mac_header_len
= entry
->skb
->network_header
-
1393 entry
->skb
->mac_header
;
1395 if (mac_header_len
!= nla_len(nfqa
[NFQA_L2HDR
]))
1397 else if (mac_header_len
> 0)
1398 memcpy(skb_mac_header(entry
->skb
),
1399 nla_data(nfqa
[NFQA_L2HDR
]),
1406 static int nfqnl_recv_verdict(struct sk_buff
*skb
, const struct nfnl_info
*info
,
1407 const struct nlattr
* const nfqa
[])
1409 struct nfnl_queue_net
*q
= nfnl_queue_pernet(info
->net
);
1410 u_int16_t queue_num
= ntohs(info
->nfmsg
->res_id
);
1411 const struct nfnl_ct_hook
*nfnl_ct
;
1412 struct nfqnl_msg_verdict_hdr
*vhdr
;
1413 enum ip_conntrack_info ctinfo
;
1414 struct nfqnl_instance
*queue
;
1415 struct nf_queue_entry
*entry
;
1416 struct nf_conn
*ct
= NULL
;
1417 unsigned int verdict
;
1420 queue
= verdict_instance_lookup(q
, queue_num
,
1421 NETLINK_CB(skb
).portid
);
1423 return PTR_ERR(queue
);
1425 vhdr
= verdicthdr_get(nfqa
);
1429 verdict
= ntohl(vhdr
->verdict
);
1431 entry
= find_dequeue_entry(queue
, ntohl(vhdr
->id
));
1435 /* rcu lock already held from nfnl->call_rcu. */
1436 nfnl_ct
= rcu_dereference(nfnl_ct_hook
);
1438 if (nfqa
[NFQA_CT
]) {
1439 if (nfnl_ct
!= NULL
)
1440 ct
= nfqnl_ct_parse(nfnl_ct
, info
->nlh
, nfqa
, entry
,
1444 if (entry
->state
.pf
== PF_BRIDGE
) {
1445 err
= nfqa_parse_bridge(entry
, nfqa
);
1450 if (nfqa
[NFQA_PAYLOAD
]) {
1451 u16 payload_len
= nla_len(nfqa
[NFQA_PAYLOAD
]);
1452 int diff
= payload_len
- entry
->skb
->len
;
1454 if (nfqnl_mangle(nla_data(nfqa
[NFQA_PAYLOAD
]),
1455 payload_len
, entry
, diff
) < 0)
1459 nfnl_ct
->seq_adjust(entry
->skb
, ct
, ctinfo
, diff
);
1462 if (nfqa
[NFQA_MARK
])
1463 entry
->skb
->mark
= ntohl(nla_get_be32(nfqa
[NFQA_MARK
]));
1465 if (nfqa
[NFQA_PRIORITY
])
1466 entry
->skb
->priority
= ntohl(nla_get_be32(nfqa
[NFQA_PRIORITY
]));
1468 nfqnl_reinject(entry
, verdict
);
1472 static int nfqnl_recv_unsupp(struct sk_buff
*skb
, const struct nfnl_info
*info
,
1473 const struct nlattr
* const cda
[])
1478 static const struct nla_policy nfqa_cfg_policy
[NFQA_CFG_MAX
+1] = {
1479 [NFQA_CFG_CMD
] = { .len
= sizeof(struct nfqnl_msg_config_cmd
) },
1480 [NFQA_CFG_PARAMS
] = { .len
= sizeof(struct nfqnl_msg_config_params
) },
1481 [NFQA_CFG_QUEUE_MAXLEN
] = { .type
= NLA_U32
},
1482 [NFQA_CFG_MASK
] = { .type
= NLA_U32
},
1483 [NFQA_CFG_FLAGS
] = { .type
= NLA_U32
},
1486 static const struct nf_queue_handler nfqh
= {
1487 .outfn
= nfqnl_enqueue_packet
,
1488 .nf_hook_drop
= nfqnl_nf_hook_drop
,
1491 static int nfqnl_recv_config(struct sk_buff
*skb
, const struct nfnl_info
*info
,
1492 const struct nlattr
* const nfqa
[])
1494 struct nfnl_queue_net
*q
= nfnl_queue_pernet(info
->net
);
1495 u_int16_t queue_num
= ntohs(info
->nfmsg
->res_id
);
1496 struct nfqnl_msg_config_cmd
*cmd
= NULL
;
1497 struct nfqnl_instance
*queue
;
1498 __u32 flags
= 0, mask
= 0;
1501 if (nfqa
[NFQA_CFG_CMD
]) {
1502 cmd
= nla_data(nfqa
[NFQA_CFG_CMD
]);
1504 /* Obsolete commands without queue context */
1505 switch (cmd
->command
) {
1506 case NFQNL_CFG_CMD_PF_BIND
: return 0;
1507 case NFQNL_CFG_CMD_PF_UNBIND
: return 0;
1511 /* Check if we support these flags in first place, dependencies should
1512 * be there too not to break atomicity.
1514 if (nfqa
[NFQA_CFG_FLAGS
]) {
1515 if (!nfqa
[NFQA_CFG_MASK
]) {
1516 /* A mask is needed to specify which flags are being
1522 flags
= ntohl(nla_get_be32(nfqa
[NFQA_CFG_FLAGS
]));
1523 mask
= ntohl(nla_get_be32(nfqa
[NFQA_CFG_MASK
]));
1525 if (flags
>= NFQA_CFG_F_MAX
)
1528 #if !IS_ENABLED(CONFIG_NETWORK_SECMARK)
1529 if (flags
& mask
& NFQA_CFG_F_SECCTX
)
1532 if ((flags
& mask
& NFQA_CFG_F_CONNTRACK
) &&
1533 !rcu_access_pointer(nfnl_ct_hook
)) {
1534 #ifdef CONFIG_MODULES
1535 nfnl_unlock(NFNL_SUBSYS_QUEUE
);
1536 request_module("ip_conntrack_netlink");
1537 nfnl_lock(NFNL_SUBSYS_QUEUE
);
1538 if (rcu_access_pointer(nfnl_ct_hook
))
1546 queue
= instance_lookup(q
, queue_num
);
1547 if (queue
&& queue
->peer_portid
!= NETLINK_CB(skb
).portid
) {
1549 goto err_out_unlock
;
1553 switch (cmd
->command
) {
1554 case NFQNL_CFG_CMD_BIND
:
1557 goto err_out_unlock
;
1559 queue
= instance_create(q
, queue_num
,
1560 NETLINK_CB(skb
).portid
);
1561 if (IS_ERR(queue
)) {
1562 ret
= PTR_ERR(queue
);
1563 goto err_out_unlock
;
1566 case NFQNL_CFG_CMD_UNBIND
:
1569 goto err_out_unlock
;
1571 instance_destroy(q
, queue
);
1572 goto err_out_unlock
;
1573 case NFQNL_CFG_CMD_PF_BIND
:
1574 case NFQNL_CFG_CMD_PF_UNBIND
:
1578 goto err_out_unlock
;
1584 goto err_out_unlock
;
1587 if (nfqa
[NFQA_CFG_PARAMS
]) {
1588 struct nfqnl_msg_config_params
*params
=
1589 nla_data(nfqa
[NFQA_CFG_PARAMS
]);
1591 nfqnl_set_mode(queue
, params
->copy_mode
,
1592 ntohl(params
->copy_range
));
1595 if (nfqa
[NFQA_CFG_QUEUE_MAXLEN
]) {
1596 __be32
*queue_maxlen
= nla_data(nfqa
[NFQA_CFG_QUEUE_MAXLEN
]);
1598 spin_lock_bh(&queue
->lock
);
1599 queue
->queue_maxlen
= ntohl(*queue_maxlen
);
1600 spin_unlock_bh(&queue
->lock
);
1603 if (nfqa
[NFQA_CFG_FLAGS
]) {
1604 spin_lock_bh(&queue
->lock
);
1605 queue
->flags
&= ~mask
;
1606 queue
->flags
|= flags
& mask
;
1607 spin_unlock_bh(&queue
->lock
);
1615 static const struct nfnl_callback nfqnl_cb
[NFQNL_MSG_MAX
] = {
1616 [NFQNL_MSG_PACKET
] = {
1617 .call
= nfqnl_recv_unsupp
,
1618 .type
= NFNL_CB_RCU
,
1619 .attr_count
= NFQA_MAX
,
1621 [NFQNL_MSG_VERDICT
] = {
1622 .call
= nfqnl_recv_verdict
,
1623 .type
= NFNL_CB_RCU
,
1624 .attr_count
= NFQA_MAX
,
1625 .policy
= nfqa_verdict_policy
1627 [NFQNL_MSG_CONFIG
] = {
1628 .call
= nfqnl_recv_config
,
1629 .type
= NFNL_CB_MUTEX
,
1630 .attr_count
= NFQA_CFG_MAX
,
1631 .policy
= nfqa_cfg_policy
1633 [NFQNL_MSG_VERDICT_BATCH
] = {
1634 .call
= nfqnl_recv_verdict_batch
,
1635 .type
= NFNL_CB_RCU
,
1636 .attr_count
= NFQA_MAX
,
1637 .policy
= nfqa_verdict_batch_policy
1641 static const struct nfnetlink_subsystem nfqnl_subsys
= {
1643 .subsys_id
= NFNL_SUBSYS_QUEUE
,
1644 .cb_count
= NFQNL_MSG_MAX
,
1648 #ifdef CONFIG_PROC_FS
1650 struct seq_net_private p
;
1651 unsigned int bucket
;
1654 static struct hlist_node
*get_first(struct seq_file
*seq
)
1656 struct iter_state
*st
= seq
->private;
1658 struct nfnl_queue_net
*q
;
1663 net
= seq_file_net(seq
);
1664 q
= nfnl_queue_pernet(net
);
1665 for (st
->bucket
= 0; st
->bucket
< INSTANCE_BUCKETS
; st
->bucket
++) {
1666 if (!hlist_empty(&q
->instance_table
[st
->bucket
]))
1667 return q
->instance_table
[st
->bucket
].first
;
1672 static struct hlist_node
*get_next(struct seq_file
*seq
, struct hlist_node
*h
)
1674 struct iter_state
*st
= seq
->private;
1675 struct net
*net
= seq_file_net(seq
);
1679 struct nfnl_queue_net
*q
;
1681 if (++st
->bucket
>= INSTANCE_BUCKETS
)
1684 q
= nfnl_queue_pernet(net
);
1685 h
= q
->instance_table
[st
->bucket
].first
;
1690 static struct hlist_node
*get_idx(struct seq_file
*seq
, loff_t pos
)
1692 struct hlist_node
*head
;
1693 head
= get_first(seq
);
1696 while (pos
&& (head
= get_next(seq
, head
)))
1698 return pos
? NULL
: head
;
1701 static void *seq_start(struct seq_file
*s
, loff_t
*pos
)
1702 __acquires(nfnl_queue_pernet(seq_file_net(s
))->instances_lock
)
1704 spin_lock(&nfnl_queue_pernet(seq_file_net(s
))->instances_lock
);
1705 return get_idx(s
, *pos
);
1708 static void *seq_next(struct seq_file
*s
, void *v
, loff_t
*pos
)
1711 return get_next(s
, v
);
1714 static void seq_stop(struct seq_file
*s
, void *v
)
1715 __releases(nfnl_queue_pernet(seq_file_net(s
))->instances_lock
)
1717 spin_unlock(&nfnl_queue_pernet(seq_file_net(s
))->instances_lock
);
1720 static int seq_show(struct seq_file
*s
, void *v
)
1722 const struct nfqnl_instance
*inst
= v
;
1724 seq_printf(s
, "%5u %6u %5u %1u %5u %5u %5u %8u %2d\n",
1726 inst
->peer_portid
, inst
->queue_total
,
1727 inst
->copy_mode
, inst
->copy_range
,
1728 inst
->queue_dropped
, inst
->queue_user_dropped
,
1729 inst
->id_sequence
, 1);
1733 static const struct seq_operations nfqnl_seq_ops
= {
1739 #endif /* PROC_FS */
1741 static int __net_init
nfnl_queue_net_init(struct net
*net
)
1744 struct nfnl_queue_net
*q
= nfnl_queue_pernet(net
);
1746 for (i
= 0; i
< INSTANCE_BUCKETS
; i
++)
1747 INIT_HLIST_HEAD(&q
->instance_table
[i
]);
1749 spin_lock_init(&q
->instances_lock
);
1751 #ifdef CONFIG_PROC_FS
1752 if (!proc_create_net("nfnetlink_queue", 0440, net
->nf
.proc_netfilter
,
1753 &nfqnl_seq_ops
, sizeof(struct iter_state
)))
1759 static void __net_exit
nfnl_queue_net_exit(struct net
*net
)
1761 struct nfnl_queue_net
*q
= nfnl_queue_pernet(net
);
1764 #ifdef CONFIG_PROC_FS
1765 remove_proc_entry("nfnetlink_queue", net
->nf
.proc_netfilter
);
1767 for (i
= 0; i
< INSTANCE_BUCKETS
; i
++)
1768 WARN_ON_ONCE(!hlist_empty(&q
->instance_table
[i
]));
1771 static struct pernet_operations nfnl_queue_net_ops
= {
1772 .init
= nfnl_queue_net_init
,
1773 .exit
= nfnl_queue_net_exit
,
1774 .id
= &nfnl_queue_net_id
,
1775 .size
= sizeof(struct nfnl_queue_net
),
1778 static int __init
nfnetlink_queue_init(void)
1782 status
= register_pernet_subsys(&nfnl_queue_net_ops
);
1784 pr_err("failed to register pernet ops\n");
1788 netlink_register_notifier(&nfqnl_rtnl_notifier
);
1789 status
= nfnetlink_subsys_register(&nfqnl_subsys
);
1791 pr_err("failed to create netlink socket\n");
1792 goto cleanup_netlink_notifier
;
1795 status
= register_netdevice_notifier(&nfqnl_dev_notifier
);
1797 pr_err("failed to register netdevice notifier\n");
1798 goto cleanup_netlink_subsys
;
1801 nf_register_queue_handler(&nfqh
);
1805 cleanup_netlink_subsys
:
1806 nfnetlink_subsys_unregister(&nfqnl_subsys
);
1807 cleanup_netlink_notifier
:
1808 netlink_unregister_notifier(&nfqnl_rtnl_notifier
);
1809 unregister_pernet_subsys(&nfnl_queue_net_ops
);
1814 static void __exit
nfnetlink_queue_fini(void)
1816 nf_unregister_queue_handler();
1817 unregister_netdevice_notifier(&nfqnl_dev_notifier
);
1818 nfnetlink_subsys_unregister(&nfqnl_subsys
);
1819 netlink_unregister_notifier(&nfqnl_rtnl_notifier
);
1820 unregister_pernet_subsys(&nfnl_queue_net_ops
);
1822 rcu_barrier(); /* Wait for completion of call_rcu()'s */
1825 MODULE_DESCRIPTION("netfilter packet queue handler");
1826 MODULE_AUTHOR("Harald Welte <laforge@netfilter.org>");
1827 MODULE_LICENSE("GPL");
1828 MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_QUEUE
);
1830 module_init(nfnetlink_queue_init
);
1831 module_exit(nfnetlink_queue_fini
);