1 /* Connection tracking via netlink socket. Allows for user space
2 * protocol helpers and general trouble making from userspace.
4 * (C) 2001 by Jay Schulist <jschlst@samba.org>
5 * (C) 2002-2006 by Harald Welte <laforge@gnumonks.org>
6 * (C) 2003 by Patrick Mchardy <kaber@trash.net>
7 * (C) 2005-2008 by Pablo Neira Ayuso <pablo@netfilter.org>
9 * Initial connection tracking via netlink development funded and
10 * generally made possible by Network Robots, Inc. (www.networkrobots.com)
12 * Further development of this code funded by Astaro AG (http://www.astaro.com)
14 * This software may be used and distributed according to the terms
15 * of the GNU General Public License, incorporated herein by reference.
18 #include <linux/init.h>
19 #include <linux/module.h>
20 #include <linux/kernel.h>
21 #include <linux/rculist.h>
22 #include <linux/rculist_nulls.h>
23 #include <linux/types.h>
24 #include <linux/timer.h>
25 #include <linux/security.h>
26 #include <linux/skbuff.h>
27 #include <linux/errno.h>
28 #include <linux/netlink.h>
29 #include <linux/spinlock.h>
30 #include <linux/interrupt.h>
31 #include <linux/slab.h>
33 #include <linux/netfilter.h>
34 #include <net/netlink.h>
36 #include <net/netfilter/nf_conntrack.h>
37 #include <net/netfilter/nf_conntrack_core.h>
38 #include <net/netfilter/nf_conntrack_expect.h>
39 #include <net/netfilter/nf_conntrack_helper.h>
40 #include <net/netfilter/nf_conntrack_l3proto.h>
41 #include <net/netfilter/nf_conntrack_l4proto.h>
42 #include <net/netfilter/nf_conntrack_tuple.h>
43 #include <net/netfilter/nf_conntrack_acct.h>
44 #include <net/netfilter/nf_conntrack_zones.h>
45 #include <net/netfilter/nf_conntrack_timestamp.h>
46 #ifdef CONFIG_NF_NAT_NEEDED
47 #include <net/netfilter/nf_nat_core.h>
48 #include <net/netfilter/nf_nat_protocol.h>
51 #include <linux/netfilter/nfnetlink.h>
52 #include <linux/netfilter/nfnetlink_conntrack.h>
54 MODULE_LICENSE("GPL");
56 static char __initdata version
[] = "0.93";
59 ctnetlink_dump_tuples_proto(struct sk_buff
*skb
,
60 const struct nf_conntrack_tuple
*tuple
,
61 struct nf_conntrack_l4proto
*l4proto
)
64 struct nlattr
*nest_parms
;
66 nest_parms
= nla_nest_start(skb
, CTA_TUPLE_PROTO
| NLA_F_NESTED
);
69 NLA_PUT_U8(skb
, CTA_PROTO_NUM
, tuple
->dst
.protonum
);
71 if (likely(l4proto
->tuple_to_nlattr
))
72 ret
= l4proto
->tuple_to_nlattr(skb
, tuple
);
74 nla_nest_end(skb
, nest_parms
);
83 ctnetlink_dump_tuples_ip(struct sk_buff
*skb
,
84 const struct nf_conntrack_tuple
*tuple
,
85 struct nf_conntrack_l3proto
*l3proto
)
88 struct nlattr
*nest_parms
;
90 nest_parms
= nla_nest_start(skb
, CTA_TUPLE_IP
| NLA_F_NESTED
);
94 if (likely(l3proto
->tuple_to_nlattr
))
95 ret
= l3proto
->tuple_to_nlattr(skb
, tuple
);
97 nla_nest_end(skb
, nest_parms
);
106 ctnetlink_dump_tuples(struct sk_buff
*skb
,
107 const struct nf_conntrack_tuple
*tuple
)
110 struct nf_conntrack_l3proto
*l3proto
;
111 struct nf_conntrack_l4proto
*l4proto
;
113 l3proto
= __nf_ct_l3proto_find(tuple
->src
.l3num
);
114 ret
= ctnetlink_dump_tuples_ip(skb
, tuple
, l3proto
);
116 if (unlikely(ret
< 0))
119 l4proto
= __nf_ct_l4proto_find(tuple
->src
.l3num
, tuple
->dst
.protonum
);
120 ret
= ctnetlink_dump_tuples_proto(skb
, tuple
, l4proto
);
126 ctnetlink_dump_status(struct sk_buff
*skb
, const struct nf_conn
*ct
)
128 NLA_PUT_BE32(skb
, CTA_STATUS
, htonl(ct
->status
));
136 ctnetlink_dump_timeout(struct sk_buff
*skb
, const struct nf_conn
*ct
)
138 long timeout
= (ct
->timeout
.expires
- jiffies
) / HZ
;
143 NLA_PUT_BE32(skb
, CTA_TIMEOUT
, htonl(timeout
));
151 ctnetlink_dump_protoinfo(struct sk_buff
*skb
, struct nf_conn
*ct
)
153 struct nf_conntrack_l4proto
*l4proto
;
154 struct nlattr
*nest_proto
;
157 l4proto
= __nf_ct_l4proto_find(nf_ct_l3num(ct
), nf_ct_protonum(ct
));
158 if (!l4proto
->to_nlattr
)
161 nest_proto
= nla_nest_start(skb
, CTA_PROTOINFO
| NLA_F_NESTED
);
163 goto nla_put_failure
;
165 ret
= l4proto
->to_nlattr(skb
, nest_proto
, ct
);
167 nla_nest_end(skb
, nest_proto
);
176 ctnetlink_dump_helpinfo(struct sk_buff
*skb
, const struct nf_conn
*ct
)
178 struct nlattr
*nest_helper
;
179 const struct nf_conn_help
*help
= nfct_help(ct
);
180 struct nf_conntrack_helper
*helper
;
185 helper
= rcu_dereference(help
->helper
);
189 nest_helper
= nla_nest_start(skb
, CTA_HELP
| NLA_F_NESTED
);
191 goto nla_put_failure
;
192 NLA_PUT_STRING(skb
, CTA_HELP_NAME
, helper
->name
);
194 if (helper
->to_nlattr
)
195 helper
->to_nlattr(skb
, ct
);
197 nla_nest_end(skb
, nest_helper
);
206 ctnetlink_dump_counters(struct sk_buff
*skb
, const struct nf_conn
*ct
,
207 enum ip_conntrack_dir dir
)
209 enum ctattr_type type
= dir
? CTA_COUNTERS_REPLY
: CTA_COUNTERS_ORIG
;
210 struct nlattr
*nest_count
;
211 const struct nf_conn_counter
*acct
;
213 acct
= nf_conn_acct_find(ct
);
217 nest_count
= nla_nest_start(skb
, type
| NLA_F_NESTED
);
219 goto nla_put_failure
;
221 NLA_PUT_BE64(skb
, CTA_COUNTERS_PACKETS
,
222 cpu_to_be64(acct
[dir
].packets
));
223 NLA_PUT_BE64(skb
, CTA_COUNTERS_BYTES
,
224 cpu_to_be64(acct
[dir
].bytes
));
226 nla_nest_end(skb
, nest_count
);
235 ctnetlink_dump_timestamp(struct sk_buff
*skb
, const struct nf_conn
*ct
)
237 struct nlattr
*nest_count
;
238 const struct nf_conn_tstamp
*tstamp
;
240 tstamp
= nf_conn_tstamp_find(ct
);
244 nest_count
= nla_nest_start(skb
, CTA_TIMESTAMP
| NLA_F_NESTED
);
246 goto nla_put_failure
;
248 NLA_PUT_BE64(skb
, CTA_TIMESTAMP_START
, cpu_to_be64(tstamp
->start
));
249 if (tstamp
->stop
!= 0) {
250 NLA_PUT_BE64(skb
, CTA_TIMESTAMP_STOP
,
251 cpu_to_be64(tstamp
->stop
));
253 nla_nest_end(skb
, nest_count
);
261 #ifdef CONFIG_NF_CONNTRACK_MARK
263 ctnetlink_dump_mark(struct sk_buff
*skb
, const struct nf_conn
*ct
)
265 NLA_PUT_BE32(skb
, CTA_MARK
, htonl(ct
->mark
));
272 #define ctnetlink_dump_mark(a, b) (0)
275 #ifdef CONFIG_NF_CONNTRACK_SECMARK
277 ctnetlink_dump_secctx(struct sk_buff
*skb
, const struct nf_conn
*ct
)
279 struct nlattr
*nest_secctx
;
283 ret
= security_secid_to_secctx(ct
->secmark
, &secctx
, &len
);
288 nest_secctx
= nla_nest_start(skb
, CTA_SECCTX
| NLA_F_NESTED
);
290 goto nla_put_failure
;
292 NLA_PUT_STRING(skb
, CTA_SECCTX_NAME
, secctx
);
293 nla_nest_end(skb
, nest_secctx
);
297 security_release_secctx(secctx
, len
);
301 #define ctnetlink_dump_secctx(a, b) (0)
304 #define master_tuple(ct) &(ct->master->tuplehash[IP_CT_DIR_ORIGINAL].tuple)
307 ctnetlink_dump_master(struct sk_buff
*skb
, const struct nf_conn
*ct
)
309 struct nlattr
*nest_parms
;
311 if (!(ct
->status
& IPS_EXPECTED
))
314 nest_parms
= nla_nest_start(skb
, CTA_TUPLE_MASTER
| NLA_F_NESTED
);
316 goto nla_put_failure
;
317 if (ctnetlink_dump_tuples(skb
, master_tuple(ct
)) < 0)
318 goto nla_put_failure
;
319 nla_nest_end(skb
, nest_parms
);
327 #ifdef CONFIG_NF_NAT_NEEDED
329 dump_nat_seq_adj(struct sk_buff
*skb
, const struct nf_nat_seq
*natseq
, int type
)
331 struct nlattr
*nest_parms
;
333 nest_parms
= nla_nest_start(skb
, type
| NLA_F_NESTED
);
335 goto nla_put_failure
;
337 NLA_PUT_BE32(skb
, CTA_NAT_SEQ_CORRECTION_POS
,
338 htonl(natseq
->correction_pos
));
339 NLA_PUT_BE32(skb
, CTA_NAT_SEQ_OFFSET_BEFORE
,
340 htonl(natseq
->offset_before
));
341 NLA_PUT_BE32(skb
, CTA_NAT_SEQ_OFFSET_AFTER
,
342 htonl(natseq
->offset_after
));
344 nla_nest_end(skb
, nest_parms
);
353 ctnetlink_dump_nat_seq_adj(struct sk_buff
*skb
, const struct nf_conn
*ct
)
355 struct nf_nat_seq
*natseq
;
356 struct nf_conn_nat
*nat
= nfct_nat(ct
);
358 if (!(ct
->status
& IPS_SEQ_ADJUST
) || !nat
)
361 natseq
= &nat
->seq
[IP_CT_DIR_ORIGINAL
];
362 if (dump_nat_seq_adj(skb
, natseq
, CTA_NAT_SEQ_ADJ_ORIG
) == -1)
365 natseq
= &nat
->seq
[IP_CT_DIR_REPLY
];
366 if (dump_nat_seq_adj(skb
, natseq
, CTA_NAT_SEQ_ADJ_REPLY
) == -1)
372 #define ctnetlink_dump_nat_seq_adj(a, b) (0)
376 ctnetlink_dump_id(struct sk_buff
*skb
, const struct nf_conn
*ct
)
378 NLA_PUT_BE32(skb
, CTA_ID
, htonl((unsigned long)ct
));
386 ctnetlink_dump_use(struct sk_buff
*skb
, const struct nf_conn
*ct
)
388 NLA_PUT_BE32(skb
, CTA_USE
, htonl(atomic_read(&ct
->ct_general
.use
)));
396 ctnetlink_fill_info(struct sk_buff
*skb
, u32 pid
, u32 seq
,
397 int event
, struct nf_conn
*ct
)
399 struct nlmsghdr
*nlh
;
400 struct nfgenmsg
*nfmsg
;
401 struct nlattr
*nest_parms
;
402 unsigned int flags
= pid
? NLM_F_MULTI
: 0;
404 event
|= NFNL_SUBSYS_CTNETLINK
<< 8;
405 nlh
= nlmsg_put(skb
, pid
, seq
, event
, sizeof(*nfmsg
), flags
);
409 nfmsg
= nlmsg_data(nlh
);
410 nfmsg
->nfgen_family
= nf_ct_l3num(ct
);
411 nfmsg
->version
= NFNETLINK_V0
;
414 nest_parms
= nla_nest_start(skb
, CTA_TUPLE_ORIG
| NLA_F_NESTED
);
416 goto nla_put_failure
;
417 if (ctnetlink_dump_tuples(skb
, nf_ct_tuple(ct
, IP_CT_DIR_ORIGINAL
)) < 0)
418 goto nla_put_failure
;
419 nla_nest_end(skb
, nest_parms
);
421 nest_parms
= nla_nest_start(skb
, CTA_TUPLE_REPLY
| NLA_F_NESTED
);
423 goto nla_put_failure
;
424 if (ctnetlink_dump_tuples(skb
, nf_ct_tuple(ct
, IP_CT_DIR_REPLY
)) < 0)
425 goto nla_put_failure
;
426 nla_nest_end(skb
, nest_parms
);
429 NLA_PUT_BE16(skb
, CTA_ZONE
, htons(nf_ct_zone(ct
)));
431 if (ctnetlink_dump_status(skb
, ct
) < 0 ||
432 ctnetlink_dump_timeout(skb
, ct
) < 0 ||
433 ctnetlink_dump_counters(skb
, ct
, IP_CT_DIR_ORIGINAL
) < 0 ||
434 ctnetlink_dump_counters(skb
, ct
, IP_CT_DIR_REPLY
) < 0 ||
435 ctnetlink_dump_timestamp(skb
, ct
) < 0 ||
436 ctnetlink_dump_protoinfo(skb
, ct
) < 0 ||
437 ctnetlink_dump_helpinfo(skb
, ct
) < 0 ||
438 ctnetlink_dump_mark(skb
, ct
) < 0 ||
439 ctnetlink_dump_secctx(skb
, ct
) < 0 ||
440 ctnetlink_dump_id(skb
, ct
) < 0 ||
441 ctnetlink_dump_use(skb
, ct
) < 0 ||
442 ctnetlink_dump_master(skb
, ct
) < 0 ||
443 ctnetlink_dump_nat_seq_adj(skb
, ct
) < 0)
444 goto nla_put_failure
;
451 nlmsg_cancel(skb
, nlh
);
455 #ifdef CONFIG_NF_CONNTRACK_EVENTS
457 ctnetlink_proto_size(const struct nf_conn
*ct
)
459 struct nf_conntrack_l3proto
*l3proto
;
460 struct nf_conntrack_l4proto
*l4proto
;
464 l3proto
= __nf_ct_l3proto_find(nf_ct_l3num(ct
));
465 len
+= l3proto
->nla_size
;
467 l4proto
= __nf_ct_l4proto_find(nf_ct_l3num(ct
), nf_ct_protonum(ct
));
468 len
+= l4proto
->nla_size
;
475 ctnetlink_counters_size(const struct nf_conn
*ct
)
477 if (!nf_ct_ext_exist(ct
, NF_CT_EXT_ACCT
))
479 return 2 * nla_total_size(0) /* CTA_COUNTERS_ORIG|REPL */
480 + 2 * nla_total_size(sizeof(uint64_t)) /* CTA_COUNTERS_PACKETS */
481 + 2 * nla_total_size(sizeof(uint64_t)) /* CTA_COUNTERS_BYTES */
486 ctnetlink_secctx_size(const struct nf_conn
*ct
)
488 #ifdef CONFIG_NF_CONNTRACK_SECMARK
491 ret
= security_secid_to_secctx(ct
->secmark
, NULL
, &len
);
495 return nla_total_size(0) /* CTA_SECCTX */
496 + nla_total_size(sizeof(char) * len
); /* CTA_SECCTX_NAME */
503 ctnetlink_timestamp_size(const struct nf_conn
*ct
)
505 #ifdef CONFIG_NF_CONNTRACK_TIMESTAMP
506 if (!nf_ct_ext_exist(ct
, NF_CT_EXT_TSTAMP
))
508 return nla_total_size(0) + 2 * nla_total_size(sizeof(uint64_t));
515 ctnetlink_nlmsg_size(const struct nf_conn
*ct
)
517 return NLMSG_ALIGN(sizeof(struct nfgenmsg
))
518 + 3 * nla_total_size(0) /* CTA_TUPLE_ORIG|REPL|MASTER */
519 + 3 * nla_total_size(0) /* CTA_TUPLE_IP */
520 + 3 * nla_total_size(0) /* CTA_TUPLE_PROTO */
521 + 3 * nla_total_size(sizeof(u_int8_t
)) /* CTA_PROTO_NUM */
522 + nla_total_size(sizeof(u_int32_t
)) /* CTA_ID */
523 + nla_total_size(sizeof(u_int32_t
)) /* CTA_STATUS */
524 + ctnetlink_counters_size(ct
)
525 + ctnetlink_timestamp_size(ct
)
526 + nla_total_size(sizeof(u_int32_t
)) /* CTA_TIMEOUT */
527 + nla_total_size(0) /* CTA_PROTOINFO */
528 + nla_total_size(0) /* CTA_HELP */
529 + nla_total_size(NF_CT_HELPER_NAME_LEN
) /* CTA_HELP_NAME */
530 + ctnetlink_secctx_size(ct
)
531 #ifdef CONFIG_NF_NAT_NEEDED
532 + 2 * nla_total_size(0) /* CTA_NAT_SEQ_ADJ_ORIG|REPL */
533 + 6 * nla_total_size(sizeof(u_int32_t
)) /* CTA_NAT_SEQ_OFFSET */
535 #ifdef CONFIG_NF_CONNTRACK_MARK
536 + nla_total_size(sizeof(u_int32_t
)) /* CTA_MARK */
538 + ctnetlink_proto_size(ct
)
543 ctnetlink_conntrack_event(unsigned int events
, struct nf_ct_event
*item
)
546 struct nlmsghdr
*nlh
;
547 struct nfgenmsg
*nfmsg
;
548 struct nlattr
*nest_parms
;
549 struct nf_conn
*ct
= item
->ct
;
552 unsigned int flags
= 0, group
;
555 /* ignore our fake conntrack entry */
556 if (nf_ct_is_untracked(ct
))
559 if (events
& (1 << IPCT_DESTROY
)) {
560 type
= IPCTNL_MSG_CT_DELETE
;
561 group
= NFNLGRP_CONNTRACK_DESTROY
;
562 } else if (events
& ((1 << IPCT_NEW
) | (1 << IPCT_RELATED
))) {
563 type
= IPCTNL_MSG_CT_NEW
;
564 flags
= NLM_F_CREATE
|NLM_F_EXCL
;
565 group
= NFNLGRP_CONNTRACK_NEW
;
567 type
= IPCTNL_MSG_CT_NEW
;
568 group
= NFNLGRP_CONNTRACK_UPDATE
;
573 if (!item
->report
&& !nfnetlink_has_listeners(net
, group
))
576 skb
= nlmsg_new(ctnetlink_nlmsg_size(ct
), GFP_ATOMIC
);
580 type
|= NFNL_SUBSYS_CTNETLINK
<< 8;
581 nlh
= nlmsg_put(skb
, item
->pid
, 0, type
, sizeof(*nfmsg
), flags
);
585 nfmsg
= nlmsg_data(nlh
);
586 nfmsg
->nfgen_family
= nf_ct_l3num(ct
);
587 nfmsg
->version
= NFNETLINK_V0
;
591 nest_parms
= nla_nest_start(skb
, CTA_TUPLE_ORIG
| NLA_F_NESTED
);
593 goto nla_put_failure
;
594 if (ctnetlink_dump_tuples(skb
, nf_ct_tuple(ct
, IP_CT_DIR_ORIGINAL
)) < 0)
595 goto nla_put_failure
;
596 nla_nest_end(skb
, nest_parms
);
598 nest_parms
= nla_nest_start(skb
, CTA_TUPLE_REPLY
| NLA_F_NESTED
);
600 goto nla_put_failure
;
601 if (ctnetlink_dump_tuples(skb
, nf_ct_tuple(ct
, IP_CT_DIR_REPLY
)) < 0)
602 goto nla_put_failure
;
603 nla_nest_end(skb
, nest_parms
);
606 NLA_PUT_BE16(skb
, CTA_ZONE
, htons(nf_ct_zone(ct
)));
608 if (ctnetlink_dump_id(skb
, ct
) < 0)
609 goto nla_put_failure
;
611 if (ctnetlink_dump_status(skb
, ct
) < 0)
612 goto nla_put_failure
;
614 if (events
& (1 << IPCT_DESTROY
)) {
615 if (ctnetlink_dump_counters(skb
, ct
, IP_CT_DIR_ORIGINAL
) < 0 ||
616 ctnetlink_dump_counters(skb
, ct
, IP_CT_DIR_REPLY
) < 0 ||
617 ctnetlink_dump_timestamp(skb
, ct
) < 0)
618 goto nla_put_failure
;
620 if (ctnetlink_dump_timeout(skb
, ct
) < 0)
621 goto nla_put_failure
;
623 if (events
& (1 << IPCT_PROTOINFO
)
624 && ctnetlink_dump_protoinfo(skb
, ct
) < 0)
625 goto nla_put_failure
;
627 if ((events
& (1 << IPCT_HELPER
) || nfct_help(ct
))
628 && ctnetlink_dump_helpinfo(skb
, ct
) < 0)
629 goto nla_put_failure
;
631 #ifdef CONFIG_NF_CONNTRACK_SECMARK
632 if ((events
& (1 << IPCT_SECMARK
) || ct
->secmark
)
633 && ctnetlink_dump_secctx(skb
, ct
) < 0)
634 goto nla_put_failure
;
637 if (events
& (1 << IPCT_RELATED
) &&
638 ctnetlink_dump_master(skb
, ct
) < 0)
639 goto nla_put_failure
;
641 if (events
& (1 << IPCT_NATSEQADJ
) &&
642 ctnetlink_dump_nat_seq_adj(skb
, ct
) < 0)
643 goto nla_put_failure
;
646 #ifdef CONFIG_NF_CONNTRACK_MARK
647 if ((events
& (1 << IPCT_MARK
) || ct
->mark
)
648 && ctnetlink_dump_mark(skb
, ct
) < 0)
649 goto nla_put_failure
;
654 err
= nfnetlink_send(skb
, net
, item
->pid
, group
, item
->report
,
656 if (err
== -ENOBUFS
|| err
== -EAGAIN
)
663 nlmsg_cancel(skb
, nlh
);
667 if (nfnetlink_set_err(net
, 0, group
, -ENOBUFS
) > 0)
672 #endif /* CONFIG_NF_CONNTRACK_EVENTS */
674 static int ctnetlink_done(struct netlink_callback
*cb
)
677 nf_ct_put((struct nf_conn
*)cb
->args
[1]);
682 ctnetlink_dump_table(struct sk_buff
*skb
, struct netlink_callback
*cb
)
684 struct net
*net
= sock_net(skb
->sk
);
685 struct nf_conn
*ct
, *last
;
686 struct nf_conntrack_tuple_hash
*h
;
687 struct hlist_nulls_node
*n
;
688 struct nfgenmsg
*nfmsg
= nlmsg_data(cb
->nlh
);
689 u_int8_t l3proto
= nfmsg
->nfgen_family
;
691 spin_lock_bh(&nf_conntrack_lock
);
692 last
= (struct nf_conn
*)cb
->args
[1];
693 for (; cb
->args
[0] < net
->ct
.htable_size
; cb
->args
[0]++) {
695 hlist_nulls_for_each_entry(h
, n
, &net
->ct
.hash
[cb
->args
[0]],
697 if (NF_CT_DIRECTION(h
) != IP_CT_DIR_ORIGINAL
)
699 ct
= nf_ct_tuplehash_to_ctrack(h
);
700 /* Dump entries of a given L3 protocol number.
701 * If it is not specified, ie. l3proto == 0,
702 * then dump everything. */
703 if (l3proto
&& nf_ct_l3num(ct
) != l3proto
)
710 if (ctnetlink_fill_info(skb
, NETLINK_CB(cb
->skb
).pid
,
712 IPCTNL_MSG_CT_NEW
, ct
) < 0) {
713 nf_conntrack_get(&ct
->ct_general
);
714 cb
->args
[1] = (unsigned long)ct
;
718 if (NFNL_MSG_TYPE(cb
->nlh
->nlmsg_type
) ==
719 IPCTNL_MSG_CT_GET_CTRZERO
) {
720 struct nf_conn_counter
*acct
;
722 acct
= nf_conn_acct_find(ct
);
724 memset(acct
, 0, sizeof(struct nf_conn_counter
[IP_CT_DIR_MAX
]));
733 spin_unlock_bh(&nf_conntrack_lock
);
741 ctnetlink_parse_tuple_ip(struct nlattr
*attr
, struct nf_conntrack_tuple
*tuple
)
743 struct nlattr
*tb
[CTA_IP_MAX
+1];
744 struct nf_conntrack_l3proto
*l3proto
;
747 nla_parse_nested(tb
, CTA_IP_MAX
, attr
, NULL
);
750 l3proto
= __nf_ct_l3proto_find(tuple
->src
.l3num
);
752 if (likely(l3proto
->nlattr_to_tuple
)) {
753 ret
= nla_validate_nested(attr
, CTA_IP_MAX
,
754 l3proto
->nla_policy
);
756 ret
= l3proto
->nlattr_to_tuple(tb
, tuple
);
764 static const struct nla_policy proto_nla_policy
[CTA_PROTO_MAX
+1] = {
765 [CTA_PROTO_NUM
] = { .type
= NLA_U8
},
769 ctnetlink_parse_tuple_proto(struct nlattr
*attr
,
770 struct nf_conntrack_tuple
*tuple
)
772 struct nlattr
*tb
[CTA_PROTO_MAX
+1];
773 struct nf_conntrack_l4proto
*l4proto
;
776 ret
= nla_parse_nested(tb
, CTA_PROTO_MAX
, attr
, proto_nla_policy
);
780 if (!tb
[CTA_PROTO_NUM
])
782 tuple
->dst
.protonum
= nla_get_u8(tb
[CTA_PROTO_NUM
]);
785 l4proto
= __nf_ct_l4proto_find(tuple
->src
.l3num
, tuple
->dst
.protonum
);
787 if (likely(l4proto
->nlattr_to_tuple
)) {
788 ret
= nla_validate_nested(attr
, CTA_PROTO_MAX
,
789 l4proto
->nla_policy
);
791 ret
= l4proto
->nlattr_to_tuple(tb
, tuple
);
799 static const struct nla_policy tuple_nla_policy
[CTA_TUPLE_MAX
+1] = {
800 [CTA_TUPLE_IP
] = { .type
= NLA_NESTED
},
801 [CTA_TUPLE_PROTO
] = { .type
= NLA_NESTED
},
805 ctnetlink_parse_tuple(const struct nlattr
* const cda
[],
806 struct nf_conntrack_tuple
*tuple
,
807 enum ctattr_type type
, u_int8_t l3num
)
809 struct nlattr
*tb
[CTA_TUPLE_MAX
+1];
812 memset(tuple
, 0, sizeof(*tuple
));
814 nla_parse_nested(tb
, CTA_TUPLE_MAX
, cda
[type
], tuple_nla_policy
);
816 if (!tb
[CTA_TUPLE_IP
])
819 tuple
->src
.l3num
= l3num
;
821 err
= ctnetlink_parse_tuple_ip(tb
[CTA_TUPLE_IP
], tuple
);
825 if (!tb
[CTA_TUPLE_PROTO
])
828 err
= ctnetlink_parse_tuple_proto(tb
[CTA_TUPLE_PROTO
], tuple
);
832 /* orig and expect tuples get DIR_ORIGINAL */
833 if (type
== CTA_TUPLE_REPLY
)
834 tuple
->dst
.dir
= IP_CT_DIR_REPLY
;
836 tuple
->dst
.dir
= IP_CT_DIR_ORIGINAL
;
842 ctnetlink_parse_zone(const struct nlattr
*attr
, u16
*zone
)
845 #ifdef CONFIG_NF_CONNTRACK_ZONES
846 *zone
= ntohs(nla_get_be16(attr
));
856 static const struct nla_policy help_nla_policy
[CTA_HELP_MAX
+1] = {
857 [CTA_HELP_NAME
] = { .type
= NLA_NUL_STRING
},
861 ctnetlink_parse_help(const struct nlattr
*attr
, char **helper_name
)
863 struct nlattr
*tb
[CTA_HELP_MAX
+1];
865 nla_parse_nested(tb
, CTA_HELP_MAX
, attr
, help_nla_policy
);
867 if (!tb
[CTA_HELP_NAME
])
870 *helper_name
= nla_data(tb
[CTA_HELP_NAME
]);
875 static const struct nla_policy ct_nla_policy
[CTA_MAX
+1] = {
876 [CTA_TUPLE_ORIG
] = { .type
= NLA_NESTED
},
877 [CTA_TUPLE_REPLY
] = { .type
= NLA_NESTED
},
878 [CTA_STATUS
] = { .type
= NLA_U32
},
879 [CTA_PROTOINFO
] = { .type
= NLA_NESTED
},
880 [CTA_HELP
] = { .type
= NLA_NESTED
},
881 [CTA_NAT_SRC
] = { .type
= NLA_NESTED
},
882 [CTA_TIMEOUT
] = { .type
= NLA_U32
},
883 [CTA_MARK
] = { .type
= NLA_U32
},
884 [CTA_ID
] = { .type
= NLA_U32
},
885 [CTA_NAT_DST
] = { .type
= NLA_NESTED
},
886 [CTA_TUPLE_MASTER
] = { .type
= NLA_NESTED
},
887 [CTA_ZONE
] = { .type
= NLA_U16
},
891 ctnetlink_del_conntrack(struct sock
*ctnl
, struct sk_buff
*skb
,
892 const struct nlmsghdr
*nlh
,
893 const struct nlattr
* const cda
[])
895 struct net
*net
= sock_net(ctnl
);
896 struct nf_conntrack_tuple_hash
*h
;
897 struct nf_conntrack_tuple tuple
;
899 struct nfgenmsg
*nfmsg
= nlmsg_data(nlh
);
900 u_int8_t u3
= nfmsg
->nfgen_family
;
904 err
= ctnetlink_parse_zone(cda
[CTA_ZONE
], &zone
);
908 if (cda
[CTA_TUPLE_ORIG
])
909 err
= ctnetlink_parse_tuple(cda
, &tuple
, CTA_TUPLE_ORIG
, u3
);
910 else if (cda
[CTA_TUPLE_REPLY
])
911 err
= ctnetlink_parse_tuple(cda
, &tuple
, CTA_TUPLE_REPLY
, u3
);
913 /* Flush the whole table */
914 nf_conntrack_flush_report(net
,
923 h
= nf_conntrack_find_get(net
, zone
, &tuple
);
927 ct
= nf_ct_tuplehash_to_ctrack(h
);
930 u_int32_t id
= ntohl(nla_get_be32(cda
[CTA_ID
]));
931 if (id
!= (u32
)(unsigned long)ct
) {
937 if (nf_conntrack_event_report(IPCT_DESTROY
, ct
,
939 nlmsg_report(nlh
)) < 0) {
940 nf_ct_delete_from_lists(ct
);
941 /* we failed to report the event, try later */
942 nf_ct_insert_dying_list(ct
);
947 /* death_by_timeout would report the event again */
948 set_bit(IPS_DYING_BIT
, &ct
->status
);
957 ctnetlink_get_conntrack(struct sock
*ctnl
, struct sk_buff
*skb
,
958 const struct nlmsghdr
*nlh
,
959 const struct nlattr
* const cda
[])
961 struct net
*net
= sock_net(ctnl
);
962 struct nf_conntrack_tuple_hash
*h
;
963 struct nf_conntrack_tuple tuple
;
965 struct sk_buff
*skb2
= NULL
;
966 struct nfgenmsg
*nfmsg
= nlmsg_data(nlh
);
967 u_int8_t u3
= nfmsg
->nfgen_family
;
971 if (nlh
->nlmsg_flags
& NLM_F_DUMP
)
972 return netlink_dump_start(ctnl
, skb
, nlh
, ctnetlink_dump_table
,
975 err
= ctnetlink_parse_zone(cda
[CTA_ZONE
], &zone
);
979 if (cda
[CTA_TUPLE_ORIG
])
980 err
= ctnetlink_parse_tuple(cda
, &tuple
, CTA_TUPLE_ORIG
, u3
);
981 else if (cda
[CTA_TUPLE_REPLY
])
982 err
= ctnetlink_parse_tuple(cda
, &tuple
, CTA_TUPLE_REPLY
, u3
);
989 h
= nf_conntrack_find_get(net
, zone
, &tuple
);
993 ct
= nf_ct_tuplehash_to_ctrack(h
);
996 skb2
= nlmsg_new(NLMSG_DEFAULT_SIZE
, GFP_KERNEL
);
1003 err
= ctnetlink_fill_info(skb2
, NETLINK_CB(skb
).pid
, nlh
->nlmsg_seq
,
1004 IPCTNL_MSG_CT_NEW
, ct
);
1010 err
= netlink_unicast(ctnl
, skb2
, NETLINK_CB(skb
).pid
, MSG_DONTWAIT
);
1019 /* this avoids a loop in nfnetlink. */
1020 return err
== -EAGAIN
? -ENOBUFS
: err
;
1023 #ifdef CONFIG_NF_NAT_NEEDED
1025 ctnetlink_parse_nat_setup(struct nf_conn
*ct
,
1026 enum nf_nat_manip_type manip
,
1027 const struct nlattr
*attr
)
1029 typeof(nfnetlink_parse_nat_setup_hook
) parse_nat_setup
;
1031 parse_nat_setup
= rcu_dereference(nfnetlink_parse_nat_setup_hook
);
1032 if (!parse_nat_setup
) {
1033 #ifdef CONFIG_MODULES
1035 spin_unlock_bh(&nf_conntrack_lock
);
1037 if (request_module("nf-nat-ipv4") < 0) {
1039 spin_lock_bh(&nf_conntrack_lock
);
1044 spin_lock_bh(&nf_conntrack_lock
);
1046 if (nfnetlink_parse_nat_setup_hook
)
1052 return parse_nat_setup(ct
, manip
, attr
);
1057 ctnetlink_change_status(struct nf_conn
*ct
, const struct nlattr
* const cda
[])
1060 unsigned int status
= ntohl(nla_get_be32(cda
[CTA_STATUS
]));
1061 d
= ct
->status
^ status
;
1063 if (d
& (IPS_EXPECTED
|IPS_CONFIRMED
|IPS_DYING
))
1067 if (d
& IPS_SEEN_REPLY
&& !(status
& IPS_SEEN_REPLY
))
1068 /* SEEN_REPLY bit can only be set */
1071 if (d
& IPS_ASSURED
&& !(status
& IPS_ASSURED
))
1072 /* ASSURED bit can only be set */
1075 /* Be careful here, modifying NAT bits can screw up things,
1076 * so don't let users modify them directly if they don't pass
1078 ct
->status
|= status
& ~(IPS_NAT_DONE_MASK
| IPS_NAT_MASK
);
1083 ctnetlink_change_nat(struct nf_conn
*ct
, const struct nlattr
* const cda
[])
1085 #ifdef CONFIG_NF_NAT_NEEDED
1088 if (cda
[CTA_NAT_DST
]) {
1089 ret
= ctnetlink_parse_nat_setup(ct
,
1095 if (cda
[CTA_NAT_SRC
]) {
1096 ret
= ctnetlink_parse_nat_setup(ct
,
1109 ctnetlink_change_helper(struct nf_conn
*ct
, const struct nlattr
* const cda
[])
1111 struct nf_conntrack_helper
*helper
;
1112 struct nf_conn_help
*help
= nfct_help(ct
);
1113 char *helpname
= NULL
;
1116 /* don't change helper of sibling connections */
1120 err
= ctnetlink_parse_help(cda
[CTA_HELP
], &helpname
);
1124 if (!strcmp(helpname
, "")) {
1125 if (help
&& help
->helper
) {
1126 /* we had a helper before ... */
1127 nf_ct_remove_expectations(ct
);
1128 RCU_INIT_POINTER(help
->helper
, NULL
);
1134 helper
= __nf_conntrack_helper_find(helpname
, nf_ct_l3num(ct
),
1135 nf_ct_protonum(ct
));
1136 if (helper
== NULL
) {
1137 #ifdef CONFIG_MODULES
1138 spin_unlock_bh(&nf_conntrack_lock
);
1140 if (request_module("nfct-helper-%s", helpname
) < 0) {
1141 spin_lock_bh(&nf_conntrack_lock
);
1145 spin_lock_bh(&nf_conntrack_lock
);
1146 helper
= __nf_conntrack_helper_find(helpname
, nf_ct_l3num(ct
),
1147 nf_ct_protonum(ct
));
1155 if (help
->helper
== helper
)
1159 /* need to zero data of old helper */
1160 memset(&help
->help
, 0, sizeof(help
->help
));
1162 /* we cannot set a helper for an existing conntrack */
1166 RCU_INIT_POINTER(help
->helper
, helper
);
1172 ctnetlink_change_timeout(struct nf_conn
*ct
, const struct nlattr
* const cda
[])
1174 u_int32_t timeout
= ntohl(nla_get_be32(cda
[CTA_TIMEOUT
]));
1176 if (!del_timer(&ct
->timeout
))
1179 ct
->timeout
.expires
= jiffies
+ timeout
* HZ
;
1180 add_timer(&ct
->timeout
);
1185 static const struct nla_policy protoinfo_policy
[CTA_PROTOINFO_MAX
+1] = {
1186 [CTA_PROTOINFO_TCP
] = { .type
= NLA_NESTED
},
1187 [CTA_PROTOINFO_DCCP
] = { .type
= NLA_NESTED
},
1188 [CTA_PROTOINFO_SCTP
] = { .type
= NLA_NESTED
},
1192 ctnetlink_change_protoinfo(struct nf_conn
*ct
, const struct nlattr
* const cda
[])
1194 const struct nlattr
*attr
= cda
[CTA_PROTOINFO
];
1195 struct nlattr
*tb
[CTA_PROTOINFO_MAX
+1];
1196 struct nf_conntrack_l4proto
*l4proto
;
1199 nla_parse_nested(tb
, CTA_PROTOINFO_MAX
, attr
, protoinfo_policy
);
1202 l4proto
= __nf_ct_l4proto_find(nf_ct_l3num(ct
), nf_ct_protonum(ct
));
1203 if (l4proto
->from_nlattr
)
1204 err
= l4proto
->from_nlattr(tb
, ct
);
1210 #ifdef CONFIG_NF_NAT_NEEDED
1211 static const struct nla_policy nat_seq_policy
[CTA_NAT_SEQ_MAX
+1] = {
1212 [CTA_NAT_SEQ_CORRECTION_POS
] = { .type
= NLA_U32
},
1213 [CTA_NAT_SEQ_OFFSET_BEFORE
] = { .type
= NLA_U32
},
1214 [CTA_NAT_SEQ_OFFSET_AFTER
] = { .type
= NLA_U32
},
1218 change_nat_seq_adj(struct nf_nat_seq
*natseq
, const struct nlattr
* const attr
)
1220 struct nlattr
*cda
[CTA_NAT_SEQ_MAX
+1];
1222 nla_parse_nested(cda
, CTA_NAT_SEQ_MAX
, attr
, nat_seq_policy
);
1224 if (!cda
[CTA_NAT_SEQ_CORRECTION_POS
])
1227 natseq
->correction_pos
=
1228 ntohl(nla_get_be32(cda
[CTA_NAT_SEQ_CORRECTION_POS
]));
1230 if (!cda
[CTA_NAT_SEQ_OFFSET_BEFORE
])
1233 natseq
->offset_before
=
1234 ntohl(nla_get_be32(cda
[CTA_NAT_SEQ_OFFSET_BEFORE
]));
1236 if (!cda
[CTA_NAT_SEQ_OFFSET_AFTER
])
1239 natseq
->offset_after
=
1240 ntohl(nla_get_be32(cda
[CTA_NAT_SEQ_OFFSET_AFTER
]));
1246 ctnetlink_change_nat_seq_adj(struct nf_conn
*ct
,
1247 const struct nlattr
* const cda
[])
1250 struct nf_conn_nat
*nat
= nfct_nat(ct
);
1255 if (cda
[CTA_NAT_SEQ_ADJ_ORIG
]) {
1256 ret
= change_nat_seq_adj(&nat
->seq
[IP_CT_DIR_ORIGINAL
],
1257 cda
[CTA_NAT_SEQ_ADJ_ORIG
]);
1261 ct
->status
|= IPS_SEQ_ADJUST
;
1264 if (cda
[CTA_NAT_SEQ_ADJ_REPLY
]) {
1265 ret
= change_nat_seq_adj(&nat
->seq
[IP_CT_DIR_REPLY
],
1266 cda
[CTA_NAT_SEQ_ADJ_REPLY
]);
1270 ct
->status
|= IPS_SEQ_ADJUST
;
1278 ctnetlink_change_conntrack(struct nf_conn
*ct
,
1279 const struct nlattr
* const cda
[])
1283 /* only allow NAT changes and master assignation for new conntracks */
1284 if (cda
[CTA_NAT_SRC
] || cda
[CTA_NAT_DST
] || cda
[CTA_TUPLE_MASTER
])
1287 if (cda
[CTA_HELP
]) {
1288 err
= ctnetlink_change_helper(ct
, cda
);
1293 if (cda
[CTA_TIMEOUT
]) {
1294 err
= ctnetlink_change_timeout(ct
, cda
);
1299 if (cda
[CTA_STATUS
]) {
1300 err
= ctnetlink_change_status(ct
, cda
);
1305 if (cda
[CTA_PROTOINFO
]) {
1306 err
= ctnetlink_change_protoinfo(ct
, cda
);
1311 #if defined(CONFIG_NF_CONNTRACK_MARK)
1313 ct
->mark
= ntohl(nla_get_be32(cda
[CTA_MARK
]));
1316 #ifdef CONFIG_NF_NAT_NEEDED
1317 if (cda
[CTA_NAT_SEQ_ADJ_ORIG
] || cda
[CTA_NAT_SEQ_ADJ_REPLY
]) {
1318 err
= ctnetlink_change_nat_seq_adj(ct
, cda
);
1327 static struct nf_conn
*
1328 ctnetlink_create_conntrack(struct net
*net
, u16 zone
,
1329 const struct nlattr
* const cda
[],
1330 struct nf_conntrack_tuple
*otuple
,
1331 struct nf_conntrack_tuple
*rtuple
,
1336 struct nf_conntrack_helper
*helper
;
1337 struct nf_conn_tstamp
*tstamp
;
1339 ct
= nf_conntrack_alloc(net
, zone
, otuple
, rtuple
, GFP_ATOMIC
);
1341 return ERR_PTR(-ENOMEM
);
1343 if (!cda
[CTA_TIMEOUT
])
1345 ct
->timeout
.expires
= ntohl(nla_get_be32(cda
[CTA_TIMEOUT
]));
1347 ct
->timeout
.expires
= jiffies
+ ct
->timeout
.expires
* HZ
;
1350 if (cda
[CTA_HELP
]) {
1351 char *helpname
= NULL
;
1353 err
= ctnetlink_parse_help(cda
[CTA_HELP
], &helpname
);
1357 helper
= __nf_conntrack_helper_find(helpname
, nf_ct_l3num(ct
),
1358 nf_ct_protonum(ct
));
1359 if (helper
== NULL
) {
1361 #ifdef CONFIG_MODULES
1362 if (request_module("nfct-helper-%s", helpname
) < 0) {
1368 helper
= __nf_conntrack_helper_find(helpname
,
1370 nf_ct_protonum(ct
));
1380 struct nf_conn_help
*help
;
1382 help
= nf_ct_helper_ext_add(ct
, GFP_ATOMIC
);
1388 /* not in hash table yet so not strictly necessary */
1389 RCU_INIT_POINTER(help
->helper
, helper
);
1392 /* try an implicit helper assignation */
1393 err
= __nf_ct_try_assign_helper(ct
, NULL
, GFP_ATOMIC
);
1398 if (cda
[CTA_NAT_SRC
] || cda
[CTA_NAT_DST
]) {
1399 err
= ctnetlink_change_nat(ct
, cda
);
1404 nf_ct_acct_ext_add(ct
, GFP_ATOMIC
);
1405 nf_ct_tstamp_ext_add(ct
, GFP_ATOMIC
);
1406 nf_ct_ecache_ext_add(ct
, 0, 0, GFP_ATOMIC
);
1407 /* we must add conntrack extensions before confirmation. */
1408 ct
->status
|= IPS_CONFIRMED
;
1410 if (cda
[CTA_STATUS
]) {
1411 err
= ctnetlink_change_status(ct
, cda
);
1416 #ifdef CONFIG_NF_NAT_NEEDED
1417 if (cda
[CTA_NAT_SEQ_ADJ_ORIG
] || cda
[CTA_NAT_SEQ_ADJ_REPLY
]) {
1418 err
= ctnetlink_change_nat_seq_adj(ct
, cda
);
1424 memset(&ct
->proto
, 0, sizeof(ct
->proto
));
1425 if (cda
[CTA_PROTOINFO
]) {
1426 err
= ctnetlink_change_protoinfo(ct
, cda
);
1431 #if defined(CONFIG_NF_CONNTRACK_MARK)
1433 ct
->mark
= ntohl(nla_get_be32(cda
[CTA_MARK
]));
1436 /* setup master conntrack: this is a confirmed expectation */
1437 if (cda
[CTA_TUPLE_MASTER
]) {
1438 struct nf_conntrack_tuple master
;
1439 struct nf_conntrack_tuple_hash
*master_h
;
1440 struct nf_conn
*master_ct
;
1442 err
= ctnetlink_parse_tuple(cda
, &master
, CTA_TUPLE_MASTER
, u3
);
1446 master_h
= nf_conntrack_find_get(net
, zone
, &master
);
1447 if (master_h
== NULL
) {
1451 master_ct
= nf_ct_tuplehash_to_ctrack(master_h
);
1452 __set_bit(IPS_EXPECTED_BIT
, &ct
->status
);
1453 ct
->master
= master_ct
;
1455 tstamp
= nf_conn_tstamp_find(ct
);
1457 tstamp
->start
= ktime_to_ns(ktime_get_real());
1459 add_timer(&ct
->timeout
);
1460 nf_conntrack_hash_insert(ct
);
1468 nf_conntrack_free(ct
);
1469 return ERR_PTR(err
);
1473 ctnetlink_new_conntrack(struct sock
*ctnl
, struct sk_buff
*skb
,
1474 const struct nlmsghdr
*nlh
,
1475 const struct nlattr
* const cda
[])
1477 struct net
*net
= sock_net(ctnl
);
1478 struct nf_conntrack_tuple otuple
, rtuple
;
1479 struct nf_conntrack_tuple_hash
*h
= NULL
;
1480 struct nfgenmsg
*nfmsg
= nlmsg_data(nlh
);
1481 u_int8_t u3
= nfmsg
->nfgen_family
;
1485 err
= ctnetlink_parse_zone(cda
[CTA_ZONE
], &zone
);
1489 if (cda
[CTA_TUPLE_ORIG
]) {
1490 err
= ctnetlink_parse_tuple(cda
, &otuple
, CTA_TUPLE_ORIG
, u3
);
1495 if (cda
[CTA_TUPLE_REPLY
]) {
1496 err
= ctnetlink_parse_tuple(cda
, &rtuple
, CTA_TUPLE_REPLY
, u3
);
1501 spin_lock_bh(&nf_conntrack_lock
);
1502 if (cda
[CTA_TUPLE_ORIG
])
1503 h
= __nf_conntrack_find(net
, zone
, &otuple
);
1504 else if (cda
[CTA_TUPLE_REPLY
])
1505 h
= __nf_conntrack_find(net
, zone
, &rtuple
);
1509 if (nlh
->nlmsg_flags
& NLM_F_CREATE
) {
1511 enum ip_conntrack_events events
;
1513 ct
= ctnetlink_create_conntrack(net
, zone
, cda
, &otuple
,
1520 nf_conntrack_get(&ct
->ct_general
);
1521 spin_unlock_bh(&nf_conntrack_lock
);
1522 if (test_bit(IPS_EXPECTED_BIT
, &ct
->status
))
1523 events
= IPCT_RELATED
;
1527 nf_conntrack_eventmask_report((1 << IPCT_REPLY
) |
1528 (1 << IPCT_ASSURED
) |
1529 (1 << IPCT_HELPER
) |
1530 (1 << IPCT_PROTOINFO
) |
1531 (1 << IPCT_NATSEQADJ
) |
1532 (1 << IPCT_MARK
) | events
,
1533 ct
, NETLINK_CB(skb
).pid
,
1537 spin_unlock_bh(&nf_conntrack_lock
);
1541 /* implicit 'else' */
1543 /* We manipulate the conntrack inside the global conntrack table lock,
1544 * so there's no need to increase the refcount */
1546 if (!(nlh
->nlmsg_flags
& NLM_F_EXCL
)) {
1547 struct nf_conn
*ct
= nf_ct_tuplehash_to_ctrack(h
);
1549 err
= ctnetlink_change_conntrack(ct
, cda
);
1551 nf_conntrack_get(&ct
->ct_general
);
1552 spin_unlock_bh(&nf_conntrack_lock
);
1553 nf_conntrack_eventmask_report((1 << IPCT_REPLY
) |
1554 (1 << IPCT_ASSURED
) |
1555 (1 << IPCT_HELPER
) |
1556 (1 << IPCT_PROTOINFO
) |
1557 (1 << IPCT_NATSEQADJ
) |
1559 ct
, NETLINK_CB(skb
).pid
,
1563 spin_unlock_bh(&nf_conntrack_lock
);
1569 spin_unlock_bh(&nf_conntrack_lock
);
1573 /***********************************************************************
1575 ***********************************************************************/
1578 ctnetlink_exp_dump_tuple(struct sk_buff
*skb
,
1579 const struct nf_conntrack_tuple
*tuple
,
1580 enum ctattr_expect type
)
1582 struct nlattr
*nest_parms
;
1584 nest_parms
= nla_nest_start(skb
, type
| NLA_F_NESTED
);
1586 goto nla_put_failure
;
1587 if (ctnetlink_dump_tuples(skb
, tuple
) < 0)
1588 goto nla_put_failure
;
1589 nla_nest_end(skb
, nest_parms
);
1598 ctnetlink_exp_dump_mask(struct sk_buff
*skb
,
1599 const struct nf_conntrack_tuple
*tuple
,
1600 const struct nf_conntrack_tuple_mask
*mask
)
1603 struct nf_conntrack_l3proto
*l3proto
;
1604 struct nf_conntrack_l4proto
*l4proto
;
1605 struct nf_conntrack_tuple m
;
1606 struct nlattr
*nest_parms
;
1608 memset(&m
, 0xFF, sizeof(m
));
1609 memcpy(&m
.src
.u3
, &mask
->src
.u3
, sizeof(m
.src
.u3
));
1610 m
.src
.u
.all
= mask
->src
.u
.all
;
1611 m
.dst
.protonum
= tuple
->dst
.protonum
;
1613 nest_parms
= nla_nest_start(skb
, CTA_EXPECT_MASK
| NLA_F_NESTED
);
1615 goto nla_put_failure
;
1617 l3proto
= __nf_ct_l3proto_find(tuple
->src
.l3num
);
1618 ret
= ctnetlink_dump_tuples_ip(skb
, &m
, l3proto
);
1620 if (unlikely(ret
< 0))
1621 goto nla_put_failure
;
1623 l4proto
= __nf_ct_l4proto_find(tuple
->src
.l3num
, tuple
->dst
.protonum
);
1624 ret
= ctnetlink_dump_tuples_proto(skb
, &m
, l4proto
);
1625 if (unlikely(ret
< 0))
1626 goto nla_put_failure
;
1628 nla_nest_end(skb
, nest_parms
);
1637 ctnetlink_exp_dump_expect(struct sk_buff
*skb
,
1638 const struct nf_conntrack_expect
*exp
)
1640 struct nf_conn
*master
= exp
->master
;
1641 long timeout
= (exp
->timeout
.expires
- jiffies
) / HZ
;
1642 struct nf_conn_help
*help
;
1647 if (ctnetlink_exp_dump_tuple(skb
, &exp
->tuple
, CTA_EXPECT_TUPLE
) < 0)
1648 goto nla_put_failure
;
1649 if (ctnetlink_exp_dump_mask(skb
, &exp
->tuple
, &exp
->mask
) < 0)
1650 goto nla_put_failure
;
1651 if (ctnetlink_exp_dump_tuple(skb
,
1652 &master
->tuplehash
[IP_CT_DIR_ORIGINAL
].tuple
,
1653 CTA_EXPECT_MASTER
) < 0)
1654 goto nla_put_failure
;
1656 NLA_PUT_BE32(skb
, CTA_EXPECT_TIMEOUT
, htonl(timeout
));
1657 NLA_PUT_BE32(skb
, CTA_EXPECT_ID
, htonl((unsigned long)exp
));
1658 NLA_PUT_BE32(skb
, CTA_EXPECT_FLAGS
, htonl(exp
->flags
));
1659 help
= nfct_help(master
);
1661 struct nf_conntrack_helper
*helper
;
1663 helper
= rcu_dereference(help
->helper
);
1665 NLA_PUT_STRING(skb
, CTA_EXPECT_HELP_NAME
, helper
->name
);
1675 ctnetlink_exp_fill_info(struct sk_buff
*skb
, u32 pid
, u32 seq
,
1676 int event
, const struct nf_conntrack_expect
*exp
)
1678 struct nlmsghdr
*nlh
;
1679 struct nfgenmsg
*nfmsg
;
1680 unsigned int flags
= pid
? NLM_F_MULTI
: 0;
1682 event
|= NFNL_SUBSYS_CTNETLINK_EXP
<< 8;
1683 nlh
= nlmsg_put(skb
, pid
, seq
, event
, sizeof(*nfmsg
), flags
);
1687 nfmsg
= nlmsg_data(nlh
);
1688 nfmsg
->nfgen_family
= exp
->tuple
.src
.l3num
;
1689 nfmsg
->version
= NFNETLINK_V0
;
1692 if (ctnetlink_exp_dump_expect(skb
, exp
) < 0)
1693 goto nla_put_failure
;
1695 nlmsg_end(skb
, nlh
);
1700 nlmsg_cancel(skb
, nlh
);
1704 #ifdef CONFIG_NF_CONNTRACK_EVENTS
1706 ctnetlink_expect_event(unsigned int events
, struct nf_exp_event
*item
)
1708 struct nf_conntrack_expect
*exp
= item
->exp
;
1709 struct net
*net
= nf_ct_exp_net(exp
);
1710 struct nlmsghdr
*nlh
;
1711 struct nfgenmsg
*nfmsg
;
1712 struct sk_buff
*skb
;
1713 unsigned int type
, group
;
1716 if (events
& (1 << IPEXP_DESTROY
)) {
1717 type
= IPCTNL_MSG_EXP_DELETE
;
1718 group
= NFNLGRP_CONNTRACK_EXP_DESTROY
;
1719 } else if (events
& (1 << IPEXP_NEW
)) {
1720 type
= IPCTNL_MSG_EXP_NEW
;
1721 flags
= NLM_F_CREATE
|NLM_F_EXCL
;
1722 group
= NFNLGRP_CONNTRACK_EXP_NEW
;
1726 if (!item
->report
&& !nfnetlink_has_listeners(net
, group
))
1729 skb
= nlmsg_new(NLMSG_DEFAULT_SIZE
, GFP_ATOMIC
);
1733 type
|= NFNL_SUBSYS_CTNETLINK_EXP
<< 8;
1734 nlh
= nlmsg_put(skb
, item
->pid
, 0, type
, sizeof(*nfmsg
), flags
);
1738 nfmsg
= nlmsg_data(nlh
);
1739 nfmsg
->nfgen_family
= exp
->tuple
.src
.l3num
;
1740 nfmsg
->version
= NFNETLINK_V0
;
1744 if (ctnetlink_exp_dump_expect(skb
, exp
) < 0)
1745 goto nla_put_failure
;
1748 nlmsg_end(skb
, nlh
);
1749 nfnetlink_send(skb
, net
, item
->pid
, group
, item
->report
, GFP_ATOMIC
);
1754 nlmsg_cancel(skb
, nlh
);
1758 nfnetlink_set_err(net
, 0, 0, -ENOBUFS
);
1762 static int ctnetlink_exp_done(struct netlink_callback
*cb
)
1765 nf_ct_expect_put((struct nf_conntrack_expect
*)cb
->args
[1]);
1770 ctnetlink_exp_dump_table(struct sk_buff
*skb
, struct netlink_callback
*cb
)
1772 struct net
*net
= sock_net(skb
->sk
);
1773 struct nf_conntrack_expect
*exp
, *last
;
1774 struct nfgenmsg
*nfmsg
= nlmsg_data(cb
->nlh
);
1775 struct hlist_node
*n
;
1776 u_int8_t l3proto
= nfmsg
->nfgen_family
;
1779 last
= (struct nf_conntrack_expect
*)cb
->args
[1];
1780 for (; cb
->args
[0] < nf_ct_expect_hsize
; cb
->args
[0]++) {
1782 hlist_for_each_entry(exp
, n
, &net
->ct
.expect_hash
[cb
->args
[0]],
1784 if (l3proto
&& exp
->tuple
.src
.l3num
!= l3proto
)
1791 if (ctnetlink_exp_fill_info(skb
,
1792 NETLINK_CB(cb
->skb
).pid
,
1796 if (!atomic_inc_not_zero(&exp
->use
))
1798 cb
->args
[1] = (unsigned long)exp
;
1810 nf_ct_expect_put(last
);
1815 static const struct nla_policy exp_nla_policy
[CTA_EXPECT_MAX
+1] = {
1816 [CTA_EXPECT_MASTER
] = { .type
= NLA_NESTED
},
1817 [CTA_EXPECT_TUPLE
] = { .type
= NLA_NESTED
},
1818 [CTA_EXPECT_MASK
] = { .type
= NLA_NESTED
},
1819 [CTA_EXPECT_TIMEOUT
] = { .type
= NLA_U32
},
1820 [CTA_EXPECT_ID
] = { .type
= NLA_U32
},
1821 [CTA_EXPECT_HELP_NAME
] = { .type
= NLA_NUL_STRING
},
1822 [CTA_EXPECT_ZONE
] = { .type
= NLA_U16
},
1823 [CTA_EXPECT_FLAGS
] = { .type
= NLA_U32
},
1827 ctnetlink_get_expect(struct sock
*ctnl
, struct sk_buff
*skb
,
1828 const struct nlmsghdr
*nlh
,
1829 const struct nlattr
* const cda
[])
1831 struct net
*net
= sock_net(ctnl
);
1832 struct nf_conntrack_tuple tuple
;
1833 struct nf_conntrack_expect
*exp
;
1834 struct sk_buff
*skb2
;
1835 struct nfgenmsg
*nfmsg
= nlmsg_data(nlh
);
1836 u_int8_t u3
= nfmsg
->nfgen_family
;
1840 if (nlh
->nlmsg_flags
& NLM_F_DUMP
) {
1841 return netlink_dump_start(ctnl
, skb
, nlh
,
1842 ctnetlink_exp_dump_table
,
1843 ctnetlink_exp_done
, 0);
1846 err
= ctnetlink_parse_zone(cda
[CTA_EXPECT_ZONE
], &zone
);
1850 if (cda
[CTA_EXPECT_MASTER
])
1851 err
= ctnetlink_parse_tuple(cda
, &tuple
, CTA_EXPECT_MASTER
, u3
);
1858 exp
= nf_ct_expect_find_get(net
, zone
, &tuple
);
1862 if (cda
[CTA_EXPECT_ID
]) {
1863 __be32 id
= nla_get_be32(cda
[CTA_EXPECT_ID
]);
1864 if (ntohl(id
) != (u32
)(unsigned long)exp
) {
1865 nf_ct_expect_put(exp
);
1871 skb2
= nlmsg_new(NLMSG_DEFAULT_SIZE
, GFP_KERNEL
);
1876 err
= ctnetlink_exp_fill_info(skb2
, NETLINK_CB(skb
).pid
,
1877 nlh
->nlmsg_seq
, IPCTNL_MSG_EXP_NEW
, exp
);
1882 nf_ct_expect_put(exp
);
1884 return netlink_unicast(ctnl
, skb2
, NETLINK_CB(skb
).pid
, MSG_DONTWAIT
);
1889 nf_ct_expect_put(exp
);
1894 ctnetlink_del_expect(struct sock
*ctnl
, struct sk_buff
*skb
,
1895 const struct nlmsghdr
*nlh
,
1896 const struct nlattr
* const cda
[])
1898 struct net
*net
= sock_net(ctnl
);
1899 struct nf_conntrack_expect
*exp
;
1900 struct nf_conntrack_tuple tuple
;
1901 struct nfgenmsg
*nfmsg
= nlmsg_data(nlh
);
1902 struct hlist_node
*n
, *next
;
1903 u_int8_t u3
= nfmsg
->nfgen_family
;
1908 if (cda
[CTA_EXPECT_TUPLE
]) {
1909 /* delete a single expect by tuple */
1910 err
= ctnetlink_parse_zone(cda
[CTA_EXPECT_ZONE
], &zone
);
1914 err
= ctnetlink_parse_tuple(cda
, &tuple
, CTA_EXPECT_TUPLE
, u3
);
1918 /* bump usage count to 2 */
1919 exp
= nf_ct_expect_find_get(net
, zone
, &tuple
);
1923 if (cda
[CTA_EXPECT_ID
]) {
1924 __be32 id
= nla_get_be32(cda
[CTA_EXPECT_ID
]);
1925 if (ntohl(id
) != (u32
)(unsigned long)exp
) {
1926 nf_ct_expect_put(exp
);
1931 /* after list removal, usage count == 1 */
1932 spin_lock_bh(&nf_conntrack_lock
);
1933 if (del_timer(&exp
->timeout
)) {
1934 nf_ct_unlink_expect_report(exp
, NETLINK_CB(skb
).pid
,
1936 nf_ct_expect_put(exp
);
1938 spin_unlock_bh(&nf_conntrack_lock
);
1939 /* have to put what we 'get' above.
1940 * after this line usage count == 0 */
1941 nf_ct_expect_put(exp
);
1942 } else if (cda
[CTA_EXPECT_HELP_NAME
]) {
1943 char *name
= nla_data(cda
[CTA_EXPECT_HELP_NAME
]);
1944 struct nf_conn_help
*m_help
;
1946 /* delete all expectations for this helper */
1947 spin_lock_bh(&nf_conntrack_lock
);
1948 for (i
= 0; i
< nf_ct_expect_hsize
; i
++) {
1949 hlist_for_each_entry_safe(exp
, n
, next
,
1950 &net
->ct
.expect_hash
[i
],
1952 m_help
= nfct_help(exp
->master
);
1953 if (!strcmp(m_help
->helper
->name
, name
) &&
1954 del_timer(&exp
->timeout
)) {
1955 nf_ct_unlink_expect_report(exp
,
1956 NETLINK_CB(skb
).pid
,
1958 nf_ct_expect_put(exp
);
1962 spin_unlock_bh(&nf_conntrack_lock
);
1964 /* This basically means we have to flush everything*/
1965 spin_lock_bh(&nf_conntrack_lock
);
1966 for (i
= 0; i
< nf_ct_expect_hsize
; i
++) {
1967 hlist_for_each_entry_safe(exp
, n
, next
,
1968 &net
->ct
.expect_hash
[i
],
1970 if (del_timer(&exp
->timeout
)) {
1971 nf_ct_unlink_expect_report(exp
,
1972 NETLINK_CB(skb
).pid
,
1974 nf_ct_expect_put(exp
);
1978 spin_unlock_bh(&nf_conntrack_lock
);
1984 ctnetlink_change_expect(struct nf_conntrack_expect
*x
,
1985 const struct nlattr
* const cda
[])
1991 ctnetlink_create_expect(struct net
*net
, u16 zone
,
1992 const struct nlattr
* const cda
[],
1994 u32 pid
, int report
)
1996 struct nf_conntrack_tuple tuple
, mask
, master_tuple
;
1997 struct nf_conntrack_tuple_hash
*h
= NULL
;
1998 struct nf_conntrack_expect
*exp
;
2000 struct nf_conn_help
*help
;
2003 /* caller guarantees that those three CTA_EXPECT_* exist */
2004 err
= ctnetlink_parse_tuple(cda
, &tuple
, CTA_EXPECT_TUPLE
, u3
);
2007 err
= ctnetlink_parse_tuple(cda
, &mask
, CTA_EXPECT_MASK
, u3
);
2010 err
= ctnetlink_parse_tuple(cda
, &master_tuple
, CTA_EXPECT_MASTER
, u3
);
2014 /* Look for master conntrack of this expectation */
2015 h
= nf_conntrack_find_get(net
, zone
, &master_tuple
);
2018 ct
= nf_ct_tuplehash_to_ctrack(h
);
2019 exp
= nf_ct_expect_alloc(ct
);
2024 help
= nfct_help(ct
);
2026 if (!cda
[CTA_EXPECT_TIMEOUT
]) {
2030 exp
->timeout
.expires
=
2031 jiffies
+ ntohl(nla_get_be32(cda
[CTA_EXPECT_TIMEOUT
])) * HZ
;
2033 exp
->flags
= NF_CT_EXPECT_USERSPACE
;
2034 if (cda
[CTA_EXPECT_FLAGS
]) {
2036 ntohl(nla_get_be32(cda
[CTA_EXPECT_FLAGS
]));
2039 if (cda
[CTA_EXPECT_FLAGS
]) {
2040 exp
->flags
= ntohl(nla_get_be32(cda
[CTA_EXPECT_FLAGS
]));
2041 exp
->flags
&= ~NF_CT_EXPECT_USERSPACE
;
2047 exp
->expectfn
= NULL
;
2050 memcpy(&exp
->tuple
, &tuple
, sizeof(struct nf_conntrack_tuple
));
2051 memcpy(&exp
->mask
.src
.u3
, &mask
.src
.u3
, sizeof(exp
->mask
.src
.u3
));
2052 exp
->mask
.src
.u
.all
= mask
.src
.u
.all
;
2054 err
= nf_ct_expect_related_report(exp
, pid
, report
);
2055 nf_ct_expect_put(exp
);
2058 nf_ct_put(nf_ct_tuplehash_to_ctrack(h
));
2063 ctnetlink_new_expect(struct sock
*ctnl
, struct sk_buff
*skb
,
2064 const struct nlmsghdr
*nlh
,
2065 const struct nlattr
* const cda
[])
2067 struct net
*net
= sock_net(ctnl
);
2068 struct nf_conntrack_tuple tuple
;
2069 struct nf_conntrack_expect
*exp
;
2070 struct nfgenmsg
*nfmsg
= nlmsg_data(nlh
);
2071 u_int8_t u3
= nfmsg
->nfgen_family
;
2075 if (!cda
[CTA_EXPECT_TUPLE
]
2076 || !cda
[CTA_EXPECT_MASK
]
2077 || !cda
[CTA_EXPECT_MASTER
])
2080 err
= ctnetlink_parse_zone(cda
[CTA_EXPECT_ZONE
], &zone
);
2084 err
= ctnetlink_parse_tuple(cda
, &tuple
, CTA_EXPECT_TUPLE
, u3
);
2088 spin_lock_bh(&nf_conntrack_lock
);
2089 exp
= __nf_ct_expect_find(net
, zone
, &tuple
);
2092 spin_unlock_bh(&nf_conntrack_lock
);
2094 if (nlh
->nlmsg_flags
& NLM_F_CREATE
) {
2095 err
= ctnetlink_create_expect(net
, zone
, cda
,
2097 NETLINK_CB(skb
).pid
,
2104 if (!(nlh
->nlmsg_flags
& NLM_F_EXCL
))
2105 err
= ctnetlink_change_expect(exp
, cda
);
2106 spin_unlock_bh(&nf_conntrack_lock
);
2111 #ifdef CONFIG_NF_CONNTRACK_EVENTS
2112 static struct nf_ct_event_notifier ctnl_notifier
= {
2113 .fcn
= ctnetlink_conntrack_event
,
2116 static struct nf_exp_event_notifier ctnl_notifier_exp
= {
2117 .fcn
= ctnetlink_expect_event
,
2121 static const struct nfnl_callback ctnl_cb
[IPCTNL_MSG_MAX
] = {
2122 [IPCTNL_MSG_CT_NEW
] = { .call
= ctnetlink_new_conntrack
,
2123 .attr_count
= CTA_MAX
,
2124 .policy
= ct_nla_policy
},
2125 [IPCTNL_MSG_CT_GET
] = { .call
= ctnetlink_get_conntrack
,
2126 .attr_count
= CTA_MAX
,
2127 .policy
= ct_nla_policy
},
2128 [IPCTNL_MSG_CT_DELETE
] = { .call
= ctnetlink_del_conntrack
,
2129 .attr_count
= CTA_MAX
,
2130 .policy
= ct_nla_policy
},
2131 [IPCTNL_MSG_CT_GET_CTRZERO
] = { .call
= ctnetlink_get_conntrack
,
2132 .attr_count
= CTA_MAX
,
2133 .policy
= ct_nla_policy
},
2136 static const struct nfnl_callback ctnl_exp_cb
[IPCTNL_MSG_EXP_MAX
] = {
2137 [IPCTNL_MSG_EXP_GET
] = { .call
= ctnetlink_get_expect
,
2138 .attr_count
= CTA_EXPECT_MAX
,
2139 .policy
= exp_nla_policy
},
2140 [IPCTNL_MSG_EXP_NEW
] = { .call
= ctnetlink_new_expect
,
2141 .attr_count
= CTA_EXPECT_MAX
,
2142 .policy
= exp_nla_policy
},
2143 [IPCTNL_MSG_EXP_DELETE
] = { .call
= ctnetlink_del_expect
,
2144 .attr_count
= CTA_EXPECT_MAX
,
2145 .policy
= exp_nla_policy
},
2148 static const struct nfnetlink_subsystem ctnl_subsys
= {
2149 .name
= "conntrack",
2150 .subsys_id
= NFNL_SUBSYS_CTNETLINK
,
2151 .cb_count
= IPCTNL_MSG_MAX
,
2155 static const struct nfnetlink_subsystem ctnl_exp_subsys
= {
2156 .name
= "conntrack_expect",
2157 .subsys_id
= NFNL_SUBSYS_CTNETLINK_EXP
,
2158 .cb_count
= IPCTNL_MSG_EXP_MAX
,
2162 MODULE_ALIAS("ip_conntrack_netlink");
2163 MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_CTNETLINK
);
2164 MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_CTNETLINK_EXP
);
2166 static int __init
ctnetlink_init(void)
2170 pr_info("ctnetlink v%s: registering with nfnetlink.\n", version
);
2171 ret
= nfnetlink_subsys_register(&ctnl_subsys
);
2173 pr_err("ctnetlink_init: cannot register with nfnetlink.\n");
2177 ret
= nfnetlink_subsys_register(&ctnl_exp_subsys
);
2179 pr_err("ctnetlink_init: cannot register exp with nfnetlink.\n");
2180 goto err_unreg_subsys
;
2183 #ifdef CONFIG_NF_CONNTRACK_EVENTS
2184 ret
= nf_conntrack_register_notifier(&ctnl_notifier
);
2186 pr_err("ctnetlink_init: cannot register notifier.\n");
2187 goto err_unreg_exp_subsys
;
2190 ret
= nf_ct_expect_register_notifier(&ctnl_notifier_exp
);
2192 pr_err("ctnetlink_init: cannot expect register notifier.\n");
2193 goto err_unreg_notifier
;
2199 #ifdef CONFIG_NF_CONNTRACK_EVENTS
2201 nf_conntrack_unregister_notifier(&ctnl_notifier
);
2202 err_unreg_exp_subsys
:
2203 nfnetlink_subsys_unregister(&ctnl_exp_subsys
);
2206 nfnetlink_subsys_unregister(&ctnl_subsys
);
2211 static void __exit
ctnetlink_exit(void)
2213 pr_info("ctnetlink: unregistering from nfnetlink.\n");
2215 nf_ct_remove_userspace_expectations();
2216 #ifdef CONFIG_NF_CONNTRACK_EVENTS
2217 nf_ct_expect_unregister_notifier(&ctnl_notifier_exp
);
2218 nf_conntrack_unregister_notifier(&ctnl_notifier
);
2221 nfnetlink_subsys_unregister(&ctnl_exp_subsys
);
2222 nfnetlink_subsys_unregister(&ctnl_subsys
);
2225 module_init(ctnetlink_init
);
2226 module_exit(ctnetlink_exit
);