2 * This is a module which is used for queueing IPv4 packets and
3 * communicating with userspace via netlink.
5 * (C) 2000-2002 James Morris <jmorris@intercode.com.au>
6 * (C) 2003-2005 Netfilter Core Team <coreteam@netfilter.org>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
12 #include <linux/module.h>
13 #include <linux/skbuff.h>
14 #include <linux/init.h>
16 #include <linux/notifier.h>
17 #include <linux/netdevice.h>
18 #include <linux/netfilter.h>
19 #include <linux/netfilter_ipv4/ip_queue.h>
20 #include <linux/netfilter_ipv4/ip_tables.h>
21 #include <linux/netlink.h>
22 #include <linux/spinlock.h>
23 #include <linux/sysctl.h>
24 #include <linux/proc_fs.h>
25 #include <linux/seq_file.h>
26 #include <linux/security.h>
27 #include <linux/net.h>
28 #include <linux/mutex.h>
29 #include <linux/slab.h>
30 #include <net/net_namespace.h>
32 #include <net/route.h>
33 #include <net/netfilter/nf_queue.h>
36 #define IPQ_QMAX_DEFAULT 1024
37 #define IPQ_PROC_FS_NAME "ip_queue"
38 #define NET_IPQ_QMAX 2088
39 #define NET_IPQ_QMAX_NAME "ip_queue_maxlen"
41 typedef int (*ipq_cmpfn
)(struct nf_queue_entry
*, unsigned long);
43 static unsigned char copy_mode __read_mostly
= IPQ_COPY_NONE
;
44 static unsigned int queue_maxlen __read_mostly
= IPQ_QMAX_DEFAULT
;
45 static DEFINE_SPINLOCK(queue_lock
);
46 static int peer_pid __read_mostly
;
47 static unsigned int copy_range __read_mostly
;
48 static unsigned int queue_total
;
49 static unsigned int queue_dropped
= 0;
50 static unsigned int queue_user_dropped
= 0;
51 static struct sock
*ipqnl __read_mostly
;
52 static LIST_HEAD(queue_list
);
53 static DEFINE_MUTEX(ipqnl_mutex
);
56 __ipq_enqueue_entry(struct nf_queue_entry
*entry
)
58 list_add_tail(&entry
->list
, &queue_list
);
63 __ipq_set_mode(unsigned char mode
, unsigned int range
)
88 static void __ipq_flush(ipq_cmpfn cmpfn
, unsigned long data
);
94 net_disable_timestamp();
95 __ipq_set_mode(IPQ_COPY_NONE
, 0);
99 static struct nf_queue_entry
*
100 ipq_find_dequeue_entry(unsigned long id
)
102 struct nf_queue_entry
*entry
= NULL
, *i
;
104 spin_lock_bh(&queue_lock
);
106 list_for_each_entry(i
, &queue_list
, list
) {
107 if ((unsigned long)i
== id
) {
114 list_del(&entry
->list
);
118 spin_unlock_bh(&queue_lock
);
123 __ipq_flush(ipq_cmpfn cmpfn
, unsigned long data
)
125 struct nf_queue_entry
*entry
, *next
;
127 list_for_each_entry_safe(entry
, next
, &queue_list
, list
) {
128 if (!cmpfn
|| cmpfn(entry
, data
)) {
129 list_del(&entry
->list
);
131 nf_reinject(entry
, NF_DROP
);
137 ipq_flush(ipq_cmpfn cmpfn
, unsigned long data
)
139 spin_lock_bh(&queue_lock
);
140 __ipq_flush(cmpfn
, data
);
141 spin_unlock_bh(&queue_lock
);
144 static struct sk_buff
*
145 ipq_build_packet_message(struct nf_queue_entry
*entry
, int *errp
)
147 sk_buff_data_t old_tail
;
151 struct ipq_packet_msg
*pmsg
;
152 struct nlmsghdr
*nlh
;
155 switch (ACCESS_ONCE(copy_mode
)) {
158 size
= NLMSG_SPACE(sizeof(*pmsg
));
161 case IPQ_COPY_PACKET
:
162 if (entry
->skb
->ip_summed
== CHECKSUM_PARTIAL
&&
163 (*errp
= skb_checksum_help(entry
->skb
)))
166 data_len
= ACCESS_ONCE(copy_range
);
167 if (data_len
== 0 || data_len
> entry
->skb
->len
)
168 data_len
= entry
->skb
->len
;
170 size
= NLMSG_SPACE(sizeof(*pmsg
) + data_len
);
178 skb
= alloc_skb(size
, GFP_ATOMIC
);
182 old_tail
= skb
->tail
;
183 nlh
= NLMSG_PUT(skb
, 0, 0, IPQM_PACKET
, size
- sizeof(*nlh
));
184 pmsg
= NLMSG_DATA(nlh
);
185 memset(pmsg
, 0, sizeof(*pmsg
));
187 pmsg
->packet_id
= (unsigned long )entry
;
188 pmsg
->data_len
= data_len
;
189 tv
= ktime_to_timeval(entry
->skb
->tstamp
);
190 pmsg
->timestamp_sec
= tv
.tv_sec
;
191 pmsg
->timestamp_usec
= tv
.tv_usec
;
192 pmsg
->mark
= entry
->skb
->mark
;
193 pmsg
->hook
= entry
->hook
;
194 pmsg
->hw_protocol
= entry
->skb
->protocol
;
197 strcpy(pmsg
->indev_name
, entry
->indev
->name
);
199 pmsg
->indev_name
[0] = '\0';
202 strcpy(pmsg
->outdev_name
, entry
->outdev
->name
);
204 pmsg
->outdev_name
[0] = '\0';
206 if (entry
->indev
&& entry
->skb
->dev
&&
207 entry
->skb
->mac_header
!= entry
->skb
->network_header
) {
208 pmsg
->hw_type
= entry
->skb
->dev
->type
;
209 pmsg
->hw_addrlen
= dev_parse_header(entry
->skb
,
214 if (skb_copy_bits(entry
->skb
, 0, pmsg
->payload
, data_len
))
217 nlh
->nlmsg_len
= skb
->tail
- old_tail
;
223 printk(KERN_ERR
"ip_queue: error creating packet message\n");
228 ipq_enqueue_packet(struct nf_queue_entry
*entry
, unsigned int queuenum
)
230 int status
= -EINVAL
;
231 struct sk_buff
*nskb
;
233 if (copy_mode
== IPQ_COPY_NONE
)
236 nskb
= ipq_build_packet_message(entry
, &status
);
240 spin_lock_bh(&queue_lock
);
243 goto err_out_free_nskb
;
245 if (queue_total
>= queue_maxlen
) {
249 printk (KERN_WARNING
"ip_queue: full at %d entries, "
250 "dropping packets(s). Dropped: %d\n", queue_total
,
252 goto err_out_free_nskb
;
255 /* netlink_unicast will either free the nskb or attach it to a socket */
256 status
= netlink_unicast(ipqnl
, nskb
, peer_pid
, MSG_DONTWAIT
);
258 queue_user_dropped
++;
262 __ipq_enqueue_entry(entry
);
264 spin_unlock_bh(&queue_lock
);
271 spin_unlock_bh(&queue_lock
);
276 ipq_mangle_ipv4(ipq_verdict_msg_t
*v
, struct nf_queue_entry
*e
)
279 struct iphdr
*user_iph
= (struct iphdr
*)v
->payload
;
280 struct sk_buff
*nskb
;
282 if (v
->data_len
< sizeof(*user_iph
))
284 diff
= v
->data_len
- e
->skb
->len
;
286 if (pskb_trim(e
->skb
, v
->data_len
))
288 } else if (diff
> 0) {
289 if (v
->data_len
> 0xFFFF)
291 if (diff
> skb_tailroom(e
->skb
)) {
292 nskb
= skb_copy_expand(e
->skb
, skb_headroom(e
->skb
),
295 printk(KERN_WARNING
"ip_queue: error "
296 "in mangle, dropping packet\n");
302 skb_put(e
->skb
, diff
);
304 if (!skb_make_writable(e
->skb
, v
->data_len
))
306 skb_copy_to_linear_data(e
->skb
, v
->payload
, v
->data_len
);
307 e
->skb
->ip_summed
= CHECKSUM_NONE
;
313 ipq_set_verdict(struct ipq_verdict_msg
*vmsg
, unsigned int len
)
315 struct nf_queue_entry
*entry
;
317 if (vmsg
->value
> NF_MAX_VERDICT
|| vmsg
->value
== NF_STOLEN
)
320 entry
= ipq_find_dequeue_entry(vmsg
->id
);
324 int verdict
= vmsg
->value
;
326 if (vmsg
->data_len
&& vmsg
->data_len
== len
)
327 if (ipq_mangle_ipv4(vmsg
, entry
) < 0)
330 nf_reinject(entry
, verdict
);
336 ipq_set_mode(unsigned char mode
, unsigned int range
)
340 spin_lock_bh(&queue_lock
);
341 status
= __ipq_set_mode(mode
, range
);
342 spin_unlock_bh(&queue_lock
);
347 ipq_receive_peer(struct ipq_peer_msg
*pmsg
,
348 unsigned char type
, unsigned int len
)
352 if (len
< sizeof(*pmsg
))
357 status
= ipq_set_mode(pmsg
->msg
.mode
.value
,
358 pmsg
->msg
.mode
.range
);
362 status
= ipq_set_verdict(&pmsg
->msg
.verdict
,
363 len
- sizeof(*pmsg
));
372 dev_cmp(struct nf_queue_entry
*entry
, unsigned long ifindex
)
375 if (entry
->indev
->ifindex
== ifindex
)
378 if (entry
->outdev
->ifindex
== ifindex
)
380 #ifdef CONFIG_BRIDGE_NETFILTER
381 if (entry
->skb
->nf_bridge
) {
382 if (entry
->skb
->nf_bridge
->physindev
&&
383 entry
->skb
->nf_bridge
->physindev
->ifindex
== ifindex
)
385 if (entry
->skb
->nf_bridge
->physoutdev
&&
386 entry
->skb
->nf_bridge
->physoutdev
->ifindex
== ifindex
)
394 ipq_dev_drop(int ifindex
)
396 ipq_flush(dev_cmp
, ifindex
);
399 #define RCV_SKB_FAIL(err) do { netlink_ack(skb, nlh, (err)); return; } while (0)
402 __ipq_rcv_skb(struct sk_buff
*skb
)
404 int status
, type
, pid
, flags
;
405 unsigned int nlmsglen
, skblen
;
406 struct nlmsghdr
*nlh
;
407 bool enable_timestamp
= false;
410 if (skblen
< sizeof(*nlh
))
413 nlh
= nlmsg_hdr(skb
);
414 nlmsglen
= nlh
->nlmsg_len
;
415 if (nlmsglen
< sizeof(*nlh
) || skblen
< nlmsglen
)
418 pid
= nlh
->nlmsg_pid
;
419 flags
= nlh
->nlmsg_flags
;
421 if(pid
<= 0 || !(flags
& NLM_F_REQUEST
) || flags
& NLM_F_MULTI
)
422 RCV_SKB_FAIL(-EINVAL
);
424 if (flags
& MSG_TRUNC
)
425 RCV_SKB_FAIL(-ECOMM
);
427 type
= nlh
->nlmsg_type
;
428 if (type
< NLMSG_NOOP
|| type
>= IPQM_MAX
)
429 RCV_SKB_FAIL(-EINVAL
);
431 if (type
<= IPQM_BASE
)
434 if (!capable(CAP_NET_ADMIN
))
435 RCV_SKB_FAIL(-EPERM
);
437 spin_lock_bh(&queue_lock
);
440 if (peer_pid
!= pid
) {
441 spin_unlock_bh(&queue_lock
);
442 RCV_SKB_FAIL(-EBUSY
);
445 enable_timestamp
= true;
449 spin_unlock_bh(&queue_lock
);
450 if (enable_timestamp
)
451 net_enable_timestamp();
452 status
= ipq_receive_peer(NLMSG_DATA(nlh
), type
,
453 nlmsglen
- NLMSG_LENGTH(0));
455 RCV_SKB_FAIL(status
);
457 if (flags
& NLM_F_ACK
)
458 netlink_ack(skb
, nlh
, 0);
462 ipq_rcv_skb(struct sk_buff
*skb
)
464 mutex_lock(&ipqnl_mutex
);
466 mutex_unlock(&ipqnl_mutex
);
470 ipq_rcv_dev_event(struct notifier_block
*this,
471 unsigned long event
, void *ptr
)
473 struct net_device
*dev
= ptr
;
475 if (!net_eq(dev_net(dev
), &init_net
))
478 /* Drop any packets associated with the downed device */
479 if (event
== NETDEV_DOWN
)
480 ipq_dev_drop(dev
->ifindex
);
484 static struct notifier_block ipq_dev_notifier
= {
485 .notifier_call
= ipq_rcv_dev_event
,
489 ipq_rcv_nl_event(struct notifier_block
*this,
490 unsigned long event
, void *ptr
)
492 struct netlink_notify
*n
= ptr
;
494 if (event
== NETLINK_URELEASE
&& n
->protocol
== NETLINK_FIREWALL
) {
495 spin_lock_bh(&queue_lock
);
496 if ((net_eq(n
->net
, &init_net
)) && (n
->pid
== peer_pid
))
498 spin_unlock_bh(&queue_lock
);
503 static struct notifier_block ipq_nl_notifier
= {
504 .notifier_call
= ipq_rcv_nl_event
,
508 static struct ctl_table_header
*ipq_sysctl_header
;
510 static ctl_table ipq_table
[] = {
512 .procname
= NET_IPQ_QMAX_NAME
,
513 .data
= &queue_maxlen
,
514 .maxlen
= sizeof(queue_maxlen
),
516 .proc_handler
= proc_dointvec
522 #ifdef CONFIG_PROC_FS
523 static int ip_queue_show(struct seq_file
*m
, void *v
)
525 spin_lock_bh(&queue_lock
);
531 "Queue length : %u\n"
532 "Queue max. length : %u\n"
533 "Queue dropped : %u\n"
534 "Netlink dropped : %u\n",
543 spin_unlock_bh(&queue_lock
);
547 static int ip_queue_open(struct inode
*inode
, struct file
*file
)
549 return single_open(file
, ip_queue_show
, NULL
);
552 static const struct file_operations ip_queue_proc_fops
= {
553 .open
= ip_queue_open
,
556 .release
= single_release
,
557 .owner
= THIS_MODULE
,
561 static const struct nf_queue_handler nfqh
= {
563 .outfn
= &ipq_enqueue_packet
,
566 static int __init
ip_queue_init(void)
568 int status
= -ENOMEM
;
569 struct proc_dir_entry
*proc __maybe_unused
;
571 netlink_register_notifier(&ipq_nl_notifier
);
572 ipqnl
= netlink_kernel_create(&init_net
, NETLINK_FIREWALL
, 0,
573 ipq_rcv_skb
, NULL
, THIS_MODULE
);
575 printk(KERN_ERR
"ip_queue: failed to create netlink socket\n");
576 goto cleanup_netlink_notifier
;
579 #ifdef CONFIG_PROC_FS
580 proc
= proc_create(IPQ_PROC_FS_NAME
, 0, init_net
.proc_net
,
581 &ip_queue_proc_fops
);
583 printk(KERN_ERR
"ip_queue: failed to create proc entry\n");
587 register_netdevice_notifier(&ipq_dev_notifier
);
589 ipq_sysctl_header
= register_sysctl_paths(net_ipv4_ctl_path
, ipq_table
);
591 status
= nf_register_queue_handler(NFPROTO_IPV4
, &nfqh
);
593 printk(KERN_ERR
"ip_queue: failed to register queue handler\n");
600 unregister_sysctl_table(ipq_sysctl_header
);
602 unregister_netdevice_notifier(&ipq_dev_notifier
);
603 proc_net_remove(&init_net
, IPQ_PROC_FS_NAME
);
604 cleanup_ipqnl
: __maybe_unused
605 netlink_kernel_release(ipqnl
);
606 mutex_lock(&ipqnl_mutex
);
607 mutex_unlock(&ipqnl_mutex
);
609 cleanup_netlink_notifier
:
610 netlink_unregister_notifier(&ipq_nl_notifier
);
614 static void __exit
ip_queue_fini(void)
616 nf_unregister_queue_handlers(&nfqh
);
621 unregister_sysctl_table(ipq_sysctl_header
);
623 unregister_netdevice_notifier(&ipq_dev_notifier
);
624 proc_net_remove(&init_net
, IPQ_PROC_FS_NAME
);
626 netlink_kernel_release(ipqnl
);
627 mutex_lock(&ipqnl_mutex
);
628 mutex_unlock(&ipqnl_mutex
);
630 netlink_unregister_notifier(&ipq_nl_notifier
);
633 MODULE_DESCRIPTION("IPv4 packet queue handler");
634 MODULE_AUTHOR("James Morris <jmorris@intercode.com.au>");
635 MODULE_LICENSE("GPL");
636 MODULE_ALIAS_NET_PF_PROTO(PF_NETLINK
, NETLINK_FIREWALL
);
638 module_init(ip_queue_init
);
639 module_exit(ip_queue_fini
);