2 * inet_diag.c Module for monitoring INET transport protocols sockets.
4 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
12 #include <linux/kernel.h>
13 #include <linux/module.h>
14 #include <linux/types.h>
15 #include <linux/fcntl.h>
16 #include <linux/random.h>
17 #include <linux/slab.h>
18 #include <linux/cache.h>
19 #include <linux/init.h>
20 #include <linux/time.h>
25 #include <net/inet_common.h>
26 #include <net/inet_connection_sock.h>
27 #include <net/inet_hashtables.h>
28 #include <net/inet_timewait_sock.h>
29 #include <net/inet6_hashtables.h>
30 #include <net/netlink.h>
32 #include <linux/inet.h>
33 #include <linux/stddef.h>
35 #include <linux/inet_diag.h>
36 #include <linux/sock_diag.h>
38 static const struct inet_diag_handler
**inet_diag_table
;
40 struct inet_diag_entry
{
50 static DEFINE_MUTEX(inet_diag_table_mutex
);
52 static const struct inet_diag_handler
*inet_diag_lock_handler(int proto
)
54 if (!inet_diag_table
[proto
])
55 request_module("net-pf-%d-proto-%d-type-%d-%d", PF_NETLINK
,
56 NETLINK_SOCK_DIAG
, AF_INET
, proto
);
58 mutex_lock(&inet_diag_table_mutex
);
59 if (!inet_diag_table
[proto
])
60 return ERR_PTR(-ENOENT
);
62 return inet_diag_table
[proto
];
65 static void inet_diag_unlock_handler(const struct inet_diag_handler
*handler
)
67 mutex_unlock(&inet_diag_table_mutex
);
70 void inet_diag_msg_common_fill(struct inet_diag_msg
*r
, struct sock
*sk
)
72 r
->idiag_family
= sk
->sk_family
;
74 r
->id
.idiag_sport
= htons(sk
->sk_num
);
75 r
->id
.idiag_dport
= sk
->sk_dport
;
76 r
->id
.idiag_if
= sk
->sk_bound_dev_if
;
77 sock_diag_save_cookie(sk
, r
->id
.idiag_cookie
);
79 #if IS_ENABLED(CONFIG_IPV6)
80 if (sk
->sk_family
== AF_INET6
) {
81 *(struct in6_addr
*)r
->id
.idiag_src
= sk
->sk_v6_rcv_saddr
;
82 *(struct in6_addr
*)r
->id
.idiag_dst
= sk
->sk_v6_daddr
;
86 memset(&r
->id
.idiag_src
, 0, sizeof(r
->id
.idiag_src
));
87 memset(&r
->id
.idiag_dst
, 0, sizeof(r
->id
.idiag_dst
));
89 r
->id
.idiag_src
[0] = sk
->sk_rcv_saddr
;
90 r
->id
.idiag_dst
[0] = sk
->sk_daddr
;
93 EXPORT_SYMBOL_GPL(inet_diag_msg_common_fill
);
95 static size_t inet_sk_attr_size(void)
97 return nla_total_size(sizeof(struct tcp_info
))
98 + nla_total_size(1) /* INET_DIAG_SHUTDOWN */
99 + nla_total_size(1) /* INET_DIAG_TOS */
100 + nla_total_size(1) /* INET_DIAG_TCLASS */
101 + nla_total_size(sizeof(struct inet_diag_meminfo
))
102 + nla_total_size(sizeof(struct inet_diag_msg
))
103 + nla_total_size(SK_MEMINFO_VARS
* sizeof(u32
))
104 + nla_total_size(TCP_CA_NAME_MAX
)
105 + nla_total_size(sizeof(struct tcpvegas_info
))
109 int inet_diag_msg_attrs_fill(struct sock
*sk
, struct sk_buff
*skb
,
110 struct inet_diag_msg
*r
, int ext
,
111 struct user_namespace
*user_ns
)
113 const struct inet_sock
*inet
= inet_sk(sk
);
115 if (nla_put_u8(skb
, INET_DIAG_SHUTDOWN
, sk
->sk_shutdown
))
118 /* IPv6 dual-stack sockets use inet->tos for IPv4 connections,
119 * hence this needs to be included regardless of socket family.
121 if (ext
& (1 << (INET_DIAG_TOS
- 1)))
122 if (nla_put_u8(skb
, INET_DIAG_TOS
, inet
->tos
) < 0)
125 #if IS_ENABLED(CONFIG_IPV6)
126 if (r
->idiag_family
== AF_INET6
) {
127 if (ext
& (1 << (INET_DIAG_TCLASS
- 1)))
128 if (nla_put_u8(skb
, INET_DIAG_TCLASS
,
129 inet6_sk(sk
)->tclass
) < 0)
132 if (((1 << sk
->sk_state
) & (TCPF_LISTEN
| TCPF_CLOSE
)) &&
133 nla_put_u8(skb
, INET_DIAG_SKV6ONLY
, ipv6_only_sock(sk
)))
138 r
->idiag_uid
= from_kuid_munged(user_ns
, sock_i_uid(sk
));
139 r
->idiag_inode
= sock_i_ino(sk
);
145 EXPORT_SYMBOL_GPL(inet_diag_msg_attrs_fill
);
147 int inet_sk_diag_fill(struct sock
*sk
, struct inet_connection_sock
*icsk
,
148 struct sk_buff
*skb
, const struct inet_diag_req_v2
*req
,
149 struct user_namespace
*user_ns
,
150 u32 portid
, u32 seq
, u16 nlmsg_flags
,
151 const struct nlmsghdr
*unlh
)
153 const struct tcp_congestion_ops
*ca_ops
;
154 const struct inet_diag_handler
*handler
;
155 int ext
= req
->idiag_ext
;
156 struct inet_diag_msg
*r
;
157 struct nlmsghdr
*nlh
;
161 handler
= inet_diag_table
[req
->sdiag_protocol
];
164 nlh
= nlmsg_put(skb
, portid
, seq
, unlh
->nlmsg_type
, sizeof(*r
),
170 BUG_ON(!sk_fullsock(sk
));
172 inet_diag_msg_common_fill(r
, sk
);
173 r
->idiag_state
= sk
->sk_state
;
175 r
->idiag_retrans
= 0;
177 if (inet_diag_msg_attrs_fill(sk
, skb
, r
, ext
, user_ns
))
180 if (ext
& (1 << (INET_DIAG_MEMINFO
- 1))) {
181 struct inet_diag_meminfo minfo
= {
182 .idiag_rmem
= sk_rmem_alloc_get(sk
),
183 .idiag_wmem
= sk
->sk_wmem_queued
,
184 .idiag_fmem
= sk
->sk_forward_alloc
,
185 .idiag_tmem
= sk_wmem_alloc_get(sk
),
188 if (nla_put(skb
, INET_DIAG_MEMINFO
, sizeof(minfo
), &minfo
) < 0)
192 if (ext
& (1 << (INET_DIAG_SKMEMINFO
- 1)))
193 if (sock_diag_put_meminfo(sk
, skb
, INET_DIAG_SKMEMINFO
))
197 handler
->idiag_get_info(sk
, r
, NULL
);
201 if (icsk
->icsk_pending
== ICSK_TIME_RETRANS
||
202 icsk
->icsk_pending
== ICSK_TIME_EARLY_RETRANS
||
203 icsk
->icsk_pending
== ICSK_TIME_LOSS_PROBE
) {
205 r
->idiag_retrans
= icsk
->icsk_retransmits
;
207 jiffies_to_msecs(icsk
->icsk_timeout
- jiffies
);
208 } else if (icsk
->icsk_pending
== ICSK_TIME_PROBE0
) {
210 r
->idiag_retrans
= icsk
->icsk_probes_out
;
212 jiffies_to_msecs(icsk
->icsk_timeout
- jiffies
);
213 } else if (timer_pending(&sk
->sk_timer
)) {
215 r
->idiag_retrans
= icsk
->icsk_probes_out
;
217 jiffies_to_msecs(sk
->sk_timer
.expires
- jiffies
);
220 r
->idiag_expires
= 0;
223 if ((ext
& (1 << (INET_DIAG_INFO
- 1))) && handler
->idiag_info_size
) {
224 attr
= nla_reserve_64bit(skb
, INET_DIAG_INFO
,
225 handler
->idiag_info_size
,
230 info
= nla_data(attr
);
233 if (ext
& (1 << (INET_DIAG_CONG
- 1))) {
237 ca_ops
= READ_ONCE(icsk
->icsk_ca_ops
);
239 err
= nla_put_string(skb
, INET_DIAG_CONG
, ca_ops
->name
);
245 handler
->idiag_get_info(sk
, r
, info
);
247 if (sk
->sk_state
< TCP_TIME_WAIT
) {
248 union tcp_cc_info info
;
253 ca_ops
= READ_ONCE(icsk
->icsk_ca_ops
);
254 if (ca_ops
&& ca_ops
->get_info
)
255 sz
= ca_ops
->get_info(sk
, ext
, &attr
, &info
);
257 if (sz
&& nla_put(skb
, attr
, sz
, &info
) < 0)
266 nlmsg_cancel(skb
, nlh
);
269 EXPORT_SYMBOL_GPL(inet_sk_diag_fill
);
271 static int inet_csk_diag_fill(struct sock
*sk
,
273 const struct inet_diag_req_v2
*req
,
274 struct user_namespace
*user_ns
,
275 u32 portid
, u32 seq
, u16 nlmsg_flags
,
276 const struct nlmsghdr
*unlh
)
278 return inet_sk_diag_fill(sk
, inet_csk(sk
), skb
, req
,
279 user_ns
, portid
, seq
, nlmsg_flags
, unlh
);
282 static int inet_twsk_diag_fill(struct sock
*sk
,
284 u32 portid
, u32 seq
, u16 nlmsg_flags
,
285 const struct nlmsghdr
*unlh
)
287 struct inet_timewait_sock
*tw
= inet_twsk(sk
);
288 struct inet_diag_msg
*r
;
289 struct nlmsghdr
*nlh
;
292 nlh
= nlmsg_put(skb
, portid
, seq
, unlh
->nlmsg_type
, sizeof(*r
),
298 BUG_ON(tw
->tw_state
!= TCP_TIME_WAIT
);
300 tmo
= tw
->tw_timer
.expires
- jiffies
;
304 inet_diag_msg_common_fill(r
, sk
);
305 r
->idiag_retrans
= 0;
307 r
->idiag_state
= tw
->tw_substate
;
309 r
->idiag_expires
= jiffies_to_msecs(tmo
);
319 static int inet_req_diag_fill(struct sock
*sk
, struct sk_buff
*skb
,
320 u32 portid
, u32 seq
, u16 nlmsg_flags
,
321 const struct nlmsghdr
*unlh
)
323 struct inet_diag_msg
*r
;
324 struct nlmsghdr
*nlh
;
327 nlh
= nlmsg_put(skb
, portid
, seq
, unlh
->nlmsg_type
, sizeof(*r
),
333 inet_diag_msg_common_fill(r
, sk
);
334 r
->idiag_state
= TCP_SYN_RECV
;
336 r
->idiag_retrans
= inet_reqsk(sk
)->num_retrans
;
338 BUILD_BUG_ON(offsetof(struct inet_request_sock
, ir_cookie
) !=
339 offsetof(struct sock
, sk_cookie
));
341 tmo
= inet_reqsk(sk
)->rsk_timer
.expires
- jiffies
;
342 r
->idiag_expires
= (tmo
>= 0) ? jiffies_to_msecs(tmo
) : 0;
352 static int sk_diag_fill(struct sock
*sk
, struct sk_buff
*skb
,
353 const struct inet_diag_req_v2
*r
,
354 struct user_namespace
*user_ns
,
355 u32 portid
, u32 seq
, u16 nlmsg_flags
,
356 const struct nlmsghdr
*unlh
)
358 if (sk
->sk_state
== TCP_TIME_WAIT
)
359 return inet_twsk_diag_fill(sk
, skb
, portid
, seq
,
362 if (sk
->sk_state
== TCP_NEW_SYN_RECV
)
363 return inet_req_diag_fill(sk
, skb
, portid
, seq
,
366 return inet_csk_diag_fill(sk
, skb
, r
, user_ns
, portid
, seq
,
370 struct sock
*inet_diag_find_one_icsk(struct net
*net
,
371 struct inet_hashinfo
*hashinfo
,
372 const struct inet_diag_req_v2
*req
)
377 if (req
->sdiag_family
== AF_INET
)
378 sk
= inet_lookup(net
, hashinfo
, NULL
, 0, req
->id
.idiag_dst
[0],
379 req
->id
.idiag_dport
, req
->id
.idiag_src
[0],
380 req
->id
.idiag_sport
, req
->id
.idiag_if
);
381 #if IS_ENABLED(CONFIG_IPV6)
382 else if (req
->sdiag_family
== AF_INET6
) {
383 if (ipv6_addr_v4mapped((struct in6_addr
*)req
->id
.idiag_dst
) &&
384 ipv6_addr_v4mapped((struct in6_addr
*)req
->id
.idiag_src
))
385 sk
= inet_lookup(net
, hashinfo
, NULL
, 0, req
->id
.idiag_dst
[3],
386 req
->id
.idiag_dport
, req
->id
.idiag_src
[3],
387 req
->id
.idiag_sport
, req
->id
.idiag_if
);
389 sk
= inet6_lookup(net
, hashinfo
, NULL
, 0,
390 (struct in6_addr
*)req
->id
.idiag_dst
,
392 (struct in6_addr
*)req
->id
.idiag_src
,
399 return ERR_PTR(-EINVAL
);
403 return ERR_PTR(-ENOENT
);
405 if (sock_diag_check_cookie(sk
, req
->id
.idiag_cookie
)) {
407 return ERR_PTR(-ENOENT
);
412 EXPORT_SYMBOL_GPL(inet_diag_find_one_icsk
);
414 int inet_diag_dump_one_icsk(struct inet_hashinfo
*hashinfo
,
415 struct sk_buff
*in_skb
,
416 const struct nlmsghdr
*nlh
,
417 const struct inet_diag_req_v2
*req
)
419 struct net
*net
= sock_net(in_skb
->sk
);
424 sk
= inet_diag_find_one_icsk(net
, hashinfo
, req
);
428 rep
= nlmsg_new(inet_sk_attr_size(), GFP_KERNEL
);
434 err
= sk_diag_fill(sk
, rep
, req
,
435 sk_user_ns(NETLINK_CB(in_skb
).sk
),
436 NETLINK_CB(in_skb
).portid
,
437 nlh
->nlmsg_seq
, 0, nlh
);
439 WARN_ON(err
== -EMSGSIZE
);
443 err
= netlink_unicast(net
->diag_nlsk
, rep
, NETLINK_CB(in_skb
).portid
,
454 EXPORT_SYMBOL_GPL(inet_diag_dump_one_icsk
);
456 static int inet_diag_cmd_exact(int cmd
, struct sk_buff
*in_skb
,
457 const struct nlmsghdr
*nlh
,
458 const struct inet_diag_req_v2
*req
)
460 const struct inet_diag_handler
*handler
;
463 handler
= inet_diag_lock_handler(req
->sdiag_protocol
);
465 err
= PTR_ERR(handler
);
466 else if (cmd
== SOCK_DIAG_BY_FAMILY
)
467 err
= handler
->dump_one(in_skb
, nlh
, req
);
468 else if (cmd
== SOCK_DESTROY
&& handler
->destroy
)
469 err
= handler
->destroy(in_skb
, req
);
472 inet_diag_unlock_handler(handler
);
477 static int bitstring_match(const __be32
*a1
, const __be32
*a2
, int bits
)
479 int words
= bits
>> 5;
484 if (memcmp(a1
, a2
, words
<< 2))
494 mask
= htonl((0xffffffff) << (32 - bits
));
496 if ((w1
^ w2
) & mask
)
503 static int inet_diag_bc_run(const struct nlattr
*_bc
,
504 const struct inet_diag_entry
*entry
)
506 const void *bc
= nla_data(_bc
);
507 int len
= nla_len(_bc
);
511 const struct inet_diag_bc_op
*op
= bc
;
514 case INET_DIAG_BC_NOP
:
516 case INET_DIAG_BC_JMP
:
519 case INET_DIAG_BC_S_GE
:
520 yes
= entry
->sport
>= op
[1].no
;
522 case INET_DIAG_BC_S_LE
:
523 yes
= entry
->sport
<= op
[1].no
;
525 case INET_DIAG_BC_D_GE
:
526 yes
= entry
->dport
>= op
[1].no
;
528 case INET_DIAG_BC_D_LE
:
529 yes
= entry
->dport
<= op
[1].no
;
531 case INET_DIAG_BC_AUTO
:
532 yes
= !(entry
->userlocks
& SOCK_BINDPORT_LOCK
);
534 case INET_DIAG_BC_S_COND
:
535 case INET_DIAG_BC_D_COND
: {
536 const struct inet_diag_hostcond
*cond
;
539 cond
= (const struct inet_diag_hostcond
*)(op
+ 1);
540 if (cond
->port
!= -1 &&
541 cond
->port
!= (op
->code
== INET_DIAG_BC_S_COND
?
542 entry
->sport
: entry
->dport
)) {
547 if (op
->code
== INET_DIAG_BC_S_COND
)
552 if (cond
->family
!= AF_UNSPEC
&&
553 cond
->family
!= entry
->family
) {
554 if (entry
->family
== AF_INET6
&&
555 cond
->family
== AF_INET
) {
556 if (addr
[0] == 0 && addr
[1] == 0 &&
557 addr
[2] == htonl(0xffff) &&
558 bitstring_match(addr
+ 3,
567 if (cond
->prefix_len
== 0)
569 if (bitstring_match(addr
, cond
->addr
,
575 case INET_DIAG_BC_DEV_COND
: {
578 ifindex
= *((const u32
*)(op
+ 1));
579 if (ifindex
!= entry
->ifindex
)
596 /* This helper is available for all sockets (ESTABLISH, TIMEWAIT, SYN_RECV)
598 static void entry_fill_addrs(struct inet_diag_entry
*entry
,
599 const struct sock
*sk
)
601 #if IS_ENABLED(CONFIG_IPV6)
602 if (sk
->sk_family
== AF_INET6
) {
603 entry
->saddr
= sk
->sk_v6_rcv_saddr
.s6_addr32
;
604 entry
->daddr
= sk
->sk_v6_daddr
.s6_addr32
;
608 entry
->saddr
= &sk
->sk_rcv_saddr
;
609 entry
->daddr
= &sk
->sk_daddr
;
613 int inet_diag_bc_sk(const struct nlattr
*bc
, struct sock
*sk
)
615 struct inet_sock
*inet
= inet_sk(sk
);
616 struct inet_diag_entry entry
;
621 entry
.family
= sk
->sk_family
;
622 entry_fill_addrs(&entry
, sk
);
623 entry
.sport
= inet
->inet_num
;
624 entry
.dport
= ntohs(inet
->inet_dport
);
625 entry
.ifindex
= sk
->sk_bound_dev_if
;
626 entry
.userlocks
= sk_fullsock(sk
) ? sk
->sk_userlocks
: 0;
628 return inet_diag_bc_run(bc
, &entry
);
630 EXPORT_SYMBOL_GPL(inet_diag_bc_sk
);
632 static int valid_cc(const void *bc
, int len
, int cc
)
635 const struct inet_diag_bc_op
*op
= bc
;
641 if (op
->yes
< 4 || op
->yes
& 3)
649 /* data is u32 ifindex */
650 static bool valid_devcond(const struct inet_diag_bc_op
*op
, int len
,
653 /* Check ifindex space. */
654 *min_len
+= sizeof(u32
);
660 /* Validate an inet_diag_hostcond. */
661 static bool valid_hostcond(const struct inet_diag_bc_op
*op
, int len
,
664 struct inet_diag_hostcond
*cond
;
667 /* Check hostcond space. */
668 *min_len
+= sizeof(struct inet_diag_hostcond
);
671 cond
= (struct inet_diag_hostcond
*)(op
+ 1);
673 /* Check address family and address length. */
674 switch (cond
->family
) {
679 addr_len
= sizeof(struct in_addr
);
682 addr_len
= sizeof(struct in6_addr
);
687 *min_len
+= addr_len
;
691 /* Check prefix length (in bits) vs address length (in bytes). */
692 if (cond
->prefix_len
> 8 * addr_len
)
698 /* Validate a port comparison operator. */
699 static bool valid_port_comparison(const struct inet_diag_bc_op
*op
,
700 int len
, int *min_len
)
702 /* Port comparisons put the port in a follow-on inet_diag_bc_op. */
703 *min_len
+= sizeof(struct inet_diag_bc_op
);
709 static int inet_diag_bc_audit(const void *bytecode
, int bytecode_len
)
711 const void *bc
= bytecode
;
712 int len
= bytecode_len
;
715 int min_len
= sizeof(struct inet_diag_bc_op
);
716 const struct inet_diag_bc_op
*op
= bc
;
719 case INET_DIAG_BC_S_COND
:
720 case INET_DIAG_BC_D_COND
:
721 if (!valid_hostcond(bc
, len
, &min_len
))
724 case INET_DIAG_BC_DEV_COND
:
725 if (!valid_devcond(bc
, len
, &min_len
))
728 case INET_DIAG_BC_S_GE
:
729 case INET_DIAG_BC_S_LE
:
730 case INET_DIAG_BC_D_GE
:
731 case INET_DIAG_BC_D_LE
:
732 if (!valid_port_comparison(bc
, len
, &min_len
))
735 case INET_DIAG_BC_AUTO
:
736 case INET_DIAG_BC_JMP
:
737 case INET_DIAG_BC_NOP
:
743 if (op
->code
!= INET_DIAG_BC_NOP
) {
744 if (op
->no
< min_len
|| op
->no
> len
+ 4 || op
->no
& 3)
747 !valid_cc(bytecode
, bytecode_len
, len
- op
->no
))
751 if (op
->yes
< min_len
|| op
->yes
> len
+ 4 || op
->yes
& 3)
756 return len
== 0 ? 0 : -EINVAL
;
759 static int inet_csk_diag_dump(struct sock
*sk
,
761 struct netlink_callback
*cb
,
762 const struct inet_diag_req_v2
*r
,
763 const struct nlattr
*bc
)
765 if (!inet_diag_bc_sk(bc
, sk
))
768 return inet_csk_diag_fill(sk
, skb
, r
,
769 sk_user_ns(NETLINK_CB(cb
->skb
).sk
),
770 NETLINK_CB(cb
->skb
).portid
,
771 cb
->nlh
->nlmsg_seq
, NLM_F_MULTI
, cb
->nlh
);
774 static void twsk_build_assert(void)
776 BUILD_BUG_ON(offsetof(struct inet_timewait_sock
, tw_family
) !=
777 offsetof(struct sock
, sk_family
));
779 BUILD_BUG_ON(offsetof(struct inet_timewait_sock
, tw_num
) !=
780 offsetof(struct inet_sock
, inet_num
));
782 BUILD_BUG_ON(offsetof(struct inet_timewait_sock
, tw_dport
) !=
783 offsetof(struct inet_sock
, inet_dport
));
785 BUILD_BUG_ON(offsetof(struct inet_timewait_sock
, tw_rcv_saddr
) !=
786 offsetof(struct inet_sock
, inet_rcv_saddr
));
788 BUILD_BUG_ON(offsetof(struct inet_timewait_sock
, tw_daddr
) !=
789 offsetof(struct inet_sock
, inet_daddr
));
791 #if IS_ENABLED(CONFIG_IPV6)
792 BUILD_BUG_ON(offsetof(struct inet_timewait_sock
, tw_v6_rcv_saddr
) !=
793 offsetof(struct sock
, sk_v6_rcv_saddr
));
795 BUILD_BUG_ON(offsetof(struct inet_timewait_sock
, tw_v6_daddr
) !=
796 offsetof(struct sock
, sk_v6_daddr
));
800 void inet_diag_dump_icsk(struct inet_hashinfo
*hashinfo
, struct sk_buff
*skb
,
801 struct netlink_callback
*cb
,
802 const struct inet_diag_req_v2
*r
, struct nlattr
*bc
)
804 struct net
*net
= sock_net(skb
->sk
);
805 int i
, num
, s_i
, s_num
;
806 u32 idiag_states
= r
->idiag_states
;
808 if (idiag_states
& TCPF_SYN_RECV
)
809 idiag_states
|= TCPF_NEW_SYN_RECV
;
811 s_num
= num
= cb
->args
[2];
813 if (cb
->args
[0] == 0) {
814 if (!(idiag_states
& TCPF_LISTEN
))
817 for (i
= s_i
; i
< INET_LHTABLE_SIZE
; i
++) {
818 struct inet_listen_hashbucket
*ilb
;
822 ilb
= &hashinfo
->listening_hash
[i
];
823 spin_lock_bh(&ilb
->lock
);
824 sk_for_each(sk
, &ilb
->head
) {
825 struct inet_sock
*inet
= inet_sk(sk
);
827 if (!net_eq(sock_net(sk
), net
))
835 if (r
->sdiag_family
!= AF_UNSPEC
&&
836 sk
->sk_family
!= r
->sdiag_family
)
839 if (r
->id
.idiag_sport
!= inet
->inet_sport
&&
843 if (r
->id
.idiag_dport
||
847 if (inet_csk_diag_dump(sk
, skb
, cb
, r
, bc
) < 0) {
848 spin_unlock_bh(&ilb
->lock
);
857 spin_unlock_bh(&ilb
->lock
);
865 s_i
= num
= s_num
= 0;
868 if (!(idiag_states
& ~TCPF_LISTEN
))
871 for (i
= s_i
; i
<= hashinfo
->ehash_mask
; i
++) {
872 struct inet_ehash_bucket
*head
= &hashinfo
->ehash
[i
];
873 spinlock_t
*lock
= inet_ehash_lockp(hashinfo
, i
);
874 struct hlist_nulls_node
*node
;
879 if (hlist_nulls_empty(&head
->chain
))
886 sk_nulls_for_each(sk
, node
, &head
->chain
) {
889 if (!net_eq(sock_net(sk
), net
))
893 state
= (sk
->sk_state
== TCP_TIME_WAIT
) ?
894 inet_twsk(sk
)->tw_substate
: sk
->sk_state
;
895 if (!(idiag_states
& (1 << state
)))
897 if (r
->sdiag_family
!= AF_UNSPEC
&&
898 sk
->sk_family
!= r
->sdiag_family
)
900 if (r
->id
.idiag_sport
!= htons(sk
->sk_num
) &&
903 if (r
->id
.idiag_dport
!= sk
->sk_dport
&&
908 if (!inet_diag_bc_sk(bc
, sk
))
911 res
= sk_diag_fill(sk
, skb
, r
,
912 sk_user_ns(NETLINK_CB(cb
->skb
).sk
),
913 NETLINK_CB(cb
->skb
).portid
,
914 cb
->nlh
->nlmsg_seq
, NLM_F_MULTI
,
917 spin_unlock_bh(lock
);
924 spin_unlock_bh(lock
);
934 EXPORT_SYMBOL_GPL(inet_diag_dump_icsk
);
936 static int __inet_diag_dump(struct sk_buff
*skb
, struct netlink_callback
*cb
,
937 const struct inet_diag_req_v2
*r
,
940 const struct inet_diag_handler
*handler
;
943 handler
= inet_diag_lock_handler(r
->sdiag_protocol
);
944 if (!IS_ERR(handler
))
945 handler
->dump(skb
, cb
, r
, bc
);
947 err
= PTR_ERR(handler
);
948 inet_diag_unlock_handler(handler
);
950 return err
? : skb
->len
;
953 static int inet_diag_dump(struct sk_buff
*skb
, struct netlink_callback
*cb
)
955 int hdrlen
= sizeof(struct inet_diag_req_v2
);
956 struct nlattr
*bc
= NULL
;
958 if (nlmsg_attrlen(cb
->nlh
, hdrlen
))
959 bc
= nlmsg_find_attr(cb
->nlh
, hdrlen
, INET_DIAG_REQ_BYTECODE
);
961 return __inet_diag_dump(skb
, cb
, nlmsg_data(cb
->nlh
), bc
);
964 static int inet_diag_type2proto(int type
)
967 case TCPDIAG_GETSOCK
:
969 case DCCPDIAG_GETSOCK
:
976 static int inet_diag_dump_compat(struct sk_buff
*skb
,
977 struct netlink_callback
*cb
)
979 struct inet_diag_req
*rc
= nlmsg_data(cb
->nlh
);
980 int hdrlen
= sizeof(struct inet_diag_req
);
981 struct inet_diag_req_v2 req
;
982 struct nlattr
*bc
= NULL
;
984 req
.sdiag_family
= AF_UNSPEC
; /* compatibility */
985 req
.sdiag_protocol
= inet_diag_type2proto(cb
->nlh
->nlmsg_type
);
986 req
.idiag_ext
= rc
->idiag_ext
;
987 req
.idiag_states
= rc
->idiag_states
;
990 if (nlmsg_attrlen(cb
->nlh
, hdrlen
))
991 bc
= nlmsg_find_attr(cb
->nlh
, hdrlen
, INET_DIAG_REQ_BYTECODE
);
993 return __inet_diag_dump(skb
, cb
, &req
, bc
);
996 static int inet_diag_get_exact_compat(struct sk_buff
*in_skb
,
997 const struct nlmsghdr
*nlh
)
999 struct inet_diag_req
*rc
= nlmsg_data(nlh
);
1000 struct inet_diag_req_v2 req
;
1002 req
.sdiag_family
= rc
->idiag_family
;
1003 req
.sdiag_protocol
= inet_diag_type2proto(nlh
->nlmsg_type
);
1004 req
.idiag_ext
= rc
->idiag_ext
;
1005 req
.idiag_states
= rc
->idiag_states
;
1008 return inet_diag_cmd_exact(SOCK_DIAG_BY_FAMILY
, in_skb
, nlh
, &req
);
1011 static int inet_diag_rcv_msg_compat(struct sk_buff
*skb
, struct nlmsghdr
*nlh
)
1013 int hdrlen
= sizeof(struct inet_diag_req
);
1014 struct net
*net
= sock_net(skb
->sk
);
1016 if (nlh
->nlmsg_type
>= INET_DIAG_GETSOCK_MAX
||
1017 nlmsg_len(nlh
) < hdrlen
)
1020 if (nlh
->nlmsg_flags
& NLM_F_DUMP
) {
1021 if (nlmsg_attrlen(nlh
, hdrlen
)) {
1022 struct nlattr
*attr
;
1024 attr
= nlmsg_find_attr(nlh
, hdrlen
,
1025 INET_DIAG_REQ_BYTECODE
);
1027 nla_len(attr
) < sizeof(struct inet_diag_bc_op
) ||
1028 inet_diag_bc_audit(nla_data(attr
), nla_len(attr
)))
1032 struct netlink_dump_control c
= {
1033 .dump
= inet_diag_dump_compat
,
1035 return netlink_dump_start(net
->diag_nlsk
, skb
, nlh
, &c
);
1039 return inet_diag_get_exact_compat(skb
, nlh
);
1042 static int inet_diag_handler_cmd(struct sk_buff
*skb
, struct nlmsghdr
*h
)
1044 int hdrlen
= sizeof(struct inet_diag_req_v2
);
1045 struct net
*net
= sock_net(skb
->sk
);
1047 if (nlmsg_len(h
) < hdrlen
)
1050 if (h
->nlmsg_type
== SOCK_DIAG_BY_FAMILY
&&
1051 h
->nlmsg_flags
& NLM_F_DUMP
) {
1052 if (nlmsg_attrlen(h
, hdrlen
)) {
1053 struct nlattr
*attr
;
1055 attr
= nlmsg_find_attr(h
, hdrlen
,
1056 INET_DIAG_REQ_BYTECODE
);
1058 nla_len(attr
) < sizeof(struct inet_diag_bc_op
) ||
1059 inet_diag_bc_audit(nla_data(attr
), nla_len(attr
)))
1063 struct netlink_dump_control c
= {
1064 .dump
= inet_diag_dump
,
1066 return netlink_dump_start(net
->diag_nlsk
, skb
, h
, &c
);
1070 return inet_diag_cmd_exact(h
->nlmsg_type
, skb
, h
, nlmsg_data(h
));
1074 int inet_diag_handler_get_info(struct sk_buff
*skb
, struct sock
*sk
)
1076 const struct inet_diag_handler
*handler
;
1077 struct nlmsghdr
*nlh
;
1078 struct nlattr
*attr
;
1079 struct inet_diag_msg
*r
;
1083 nlh
= nlmsg_put(skb
, 0, 0, SOCK_DIAG_BY_FAMILY
, sizeof(*r
), 0);
1087 r
= nlmsg_data(nlh
);
1088 memset(r
, 0, sizeof(*r
));
1089 inet_diag_msg_common_fill(r
, sk
);
1090 if (sk
->sk_type
== SOCK_DGRAM
|| sk
->sk_type
== SOCK_STREAM
)
1091 r
->id
.idiag_sport
= inet_sk(sk
)->inet_sport
;
1092 r
->idiag_state
= sk
->sk_state
;
1094 if ((err
= nla_put_u8(skb
, INET_DIAG_PROTOCOL
, sk
->sk_protocol
))) {
1095 nlmsg_cancel(skb
, nlh
);
1099 handler
= inet_diag_lock_handler(sk
->sk_protocol
);
1100 if (IS_ERR(handler
)) {
1101 inet_diag_unlock_handler(handler
);
1102 nlmsg_cancel(skb
, nlh
);
1103 return PTR_ERR(handler
);
1106 attr
= handler
->idiag_info_size
1107 ? nla_reserve_64bit(skb
, INET_DIAG_INFO
,
1108 handler
->idiag_info_size
,
1112 info
= nla_data(attr
);
1114 handler
->idiag_get_info(sk
, r
, info
);
1115 inet_diag_unlock_handler(handler
);
1117 nlmsg_end(skb
, nlh
);
1121 static const struct sock_diag_handler inet_diag_handler
= {
1123 .dump
= inet_diag_handler_cmd
,
1124 .get_info
= inet_diag_handler_get_info
,
1125 .destroy
= inet_diag_handler_cmd
,
1128 static const struct sock_diag_handler inet6_diag_handler
= {
1130 .dump
= inet_diag_handler_cmd
,
1131 .get_info
= inet_diag_handler_get_info
,
1132 .destroy
= inet_diag_handler_cmd
,
1135 int inet_diag_register(const struct inet_diag_handler
*h
)
1137 const __u16 type
= h
->idiag_type
;
1140 if (type
>= IPPROTO_MAX
)
1143 mutex_lock(&inet_diag_table_mutex
);
1145 if (!inet_diag_table
[type
]) {
1146 inet_diag_table
[type
] = h
;
1149 mutex_unlock(&inet_diag_table_mutex
);
1153 EXPORT_SYMBOL_GPL(inet_diag_register
);
1155 void inet_diag_unregister(const struct inet_diag_handler
*h
)
1157 const __u16 type
= h
->idiag_type
;
1159 if (type
>= IPPROTO_MAX
)
1162 mutex_lock(&inet_diag_table_mutex
);
1163 inet_diag_table
[type
] = NULL
;
1164 mutex_unlock(&inet_diag_table_mutex
);
1166 EXPORT_SYMBOL_GPL(inet_diag_unregister
);
1168 static int __init
inet_diag_init(void)
1170 const int inet_diag_table_size
= (IPPROTO_MAX
*
1171 sizeof(struct inet_diag_handler
*));
1174 inet_diag_table
= kzalloc(inet_diag_table_size
, GFP_KERNEL
);
1175 if (!inet_diag_table
)
1178 err
= sock_diag_register(&inet_diag_handler
);
1182 err
= sock_diag_register(&inet6_diag_handler
);
1186 sock_diag_register_inet_compat(inet_diag_rcv_msg_compat
);
1191 sock_diag_unregister(&inet_diag_handler
);
1193 kfree(inet_diag_table
);
1197 static void __exit
inet_diag_exit(void)
1199 sock_diag_unregister(&inet6_diag_handler
);
1200 sock_diag_unregister(&inet_diag_handler
);
1201 sock_diag_unregister_inet_compat(inet_diag_rcv_msg_compat
);
1202 kfree(inet_diag_table
);
1205 module_init(inet_diag_init
);
1206 module_exit(inet_diag_exit
);
1207 MODULE_LICENSE("GPL");
1208 MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_NETLINK
, NETLINK_SOCK_DIAG
, 2 /* AF_INET */);
1209 MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_NETLINK
, NETLINK_SOCK_DIAG
, 10 /* AF_INET6 */);