2 * inet_diag.c Module for monitoring INET transport protocols sockets.
4 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
12 #include <linux/kernel.h>
13 #include <linux/module.h>
14 #include <linux/types.h>
15 #include <linux/fcntl.h>
16 #include <linux/random.h>
17 #include <linux/slab.h>
18 #include <linux/cache.h>
19 #include <linux/init.h>
20 #include <linux/time.h>
25 #include <net/inet_common.h>
26 #include <net/inet_connection_sock.h>
27 #include <net/inet_hashtables.h>
28 #include <net/inet_timewait_sock.h>
29 #include <net/inet6_hashtables.h>
30 #include <net/netlink.h>
32 #include <linux/inet.h>
33 #include <linux/stddef.h>
35 #include <linux/inet_diag.h>
36 #include <linux/sock_diag.h>
38 static const struct inet_diag_handler
**inet_diag_table
;
40 struct inet_diag_entry
{
49 static DEFINE_MUTEX(inet_diag_table_mutex
);
51 static const struct inet_diag_handler
*inet_diag_lock_handler(int proto
)
53 if (!inet_diag_table
[proto
])
54 request_module("net-pf-%d-proto-%d-type-%d-%d", PF_NETLINK
,
55 NETLINK_SOCK_DIAG
, AF_INET
, proto
);
57 mutex_lock(&inet_diag_table_mutex
);
58 if (!inet_diag_table
[proto
])
59 return ERR_PTR(-ENOENT
);
61 return inet_diag_table
[proto
];
64 static void inet_diag_unlock_handler(const struct inet_diag_handler
*handler
)
66 mutex_unlock(&inet_diag_table_mutex
);
69 static void inet_diag_msg_common_fill(struct inet_diag_msg
*r
, struct sock
*sk
)
71 r
->idiag_family
= sk
->sk_family
;
73 r
->id
.idiag_sport
= htons(sk
->sk_num
);
74 r
->id
.idiag_dport
= sk
->sk_dport
;
75 r
->id
.idiag_if
= sk
->sk_bound_dev_if
;
76 sock_diag_save_cookie(sk
, r
->id
.idiag_cookie
);
78 #if IS_ENABLED(CONFIG_IPV6)
79 if (sk
->sk_family
== AF_INET6
) {
80 *(struct in6_addr
*)r
->id
.idiag_src
= sk
->sk_v6_rcv_saddr
;
81 *(struct in6_addr
*)r
->id
.idiag_dst
= sk
->sk_v6_daddr
;
85 memset(&r
->id
.idiag_src
, 0, sizeof(r
->id
.idiag_src
));
86 memset(&r
->id
.idiag_dst
, 0, sizeof(r
->id
.idiag_dst
));
88 r
->id
.idiag_src
[0] = sk
->sk_rcv_saddr
;
89 r
->id
.idiag_dst
[0] = sk
->sk_daddr
;
93 static size_t inet_sk_attr_size(void)
95 return nla_total_size(sizeof(struct tcp_info
))
96 + nla_total_size(1) /* INET_DIAG_SHUTDOWN */
97 + nla_total_size(1) /* INET_DIAG_TOS */
98 + nla_total_size(1) /* INET_DIAG_TCLASS */
99 + nla_total_size(sizeof(struct inet_diag_meminfo
))
100 + nla_total_size(sizeof(struct inet_diag_msg
))
101 + nla_total_size(SK_MEMINFO_VARS
* sizeof(u32
))
102 + nla_total_size(TCP_CA_NAME_MAX
)
103 + nla_total_size(sizeof(struct tcpvegas_info
))
107 int inet_sk_diag_fill(struct sock
*sk
, struct inet_connection_sock
*icsk
,
108 struct sk_buff
*skb
, const struct inet_diag_req_v2
*req
,
109 struct user_namespace
*user_ns
,
110 u32 portid
, u32 seq
, u16 nlmsg_flags
,
111 const struct nlmsghdr
*unlh
)
113 const struct inet_sock
*inet
= inet_sk(sk
);
114 const struct tcp_congestion_ops
*ca_ops
;
115 const struct inet_diag_handler
*handler
;
116 int ext
= req
->idiag_ext
;
117 struct inet_diag_msg
*r
;
118 struct nlmsghdr
*nlh
;
122 handler
= inet_diag_table
[req
->sdiag_protocol
];
125 nlh
= nlmsg_put(skb
, portid
, seq
, unlh
->nlmsg_type
, sizeof(*r
),
131 BUG_ON(!sk_fullsock(sk
));
133 inet_diag_msg_common_fill(r
, sk
);
134 r
->idiag_state
= sk
->sk_state
;
136 r
->idiag_retrans
= 0;
138 if (nla_put_u8(skb
, INET_DIAG_SHUTDOWN
, sk
->sk_shutdown
))
141 /* IPv6 dual-stack sockets use inet->tos for IPv4 connections,
142 * hence this needs to be included regardless of socket family.
144 if (ext
& (1 << (INET_DIAG_TOS
- 1)))
145 if (nla_put_u8(skb
, INET_DIAG_TOS
, inet
->tos
) < 0)
148 #if IS_ENABLED(CONFIG_IPV6)
149 if (r
->idiag_family
== AF_INET6
) {
150 if (ext
& (1 << (INET_DIAG_TCLASS
- 1)))
151 if (nla_put_u8(skb
, INET_DIAG_TCLASS
,
152 inet6_sk(sk
)->tclass
) < 0)
157 r
->idiag_uid
= from_kuid_munged(user_ns
, sock_i_uid(sk
));
158 r
->idiag_inode
= sock_i_ino(sk
);
160 if (ext
& (1 << (INET_DIAG_MEMINFO
- 1))) {
161 struct inet_diag_meminfo minfo
= {
162 .idiag_rmem
= sk_rmem_alloc_get(sk
),
163 .idiag_wmem
= sk
->sk_wmem_queued
,
164 .idiag_fmem
= sk
->sk_forward_alloc
,
165 .idiag_tmem
= sk_wmem_alloc_get(sk
),
168 if (nla_put(skb
, INET_DIAG_MEMINFO
, sizeof(minfo
), &minfo
) < 0)
172 if (ext
& (1 << (INET_DIAG_SKMEMINFO
- 1)))
173 if (sock_diag_put_meminfo(sk
, skb
, INET_DIAG_SKMEMINFO
))
177 handler
->idiag_get_info(sk
, r
, NULL
);
181 #define EXPIRES_IN_MS(tmo) DIV_ROUND_UP((tmo - jiffies) * 1000, HZ)
183 if (icsk
->icsk_pending
== ICSK_TIME_RETRANS
||
184 icsk
->icsk_pending
== ICSK_TIME_EARLY_RETRANS
||
185 icsk
->icsk_pending
== ICSK_TIME_LOSS_PROBE
) {
187 r
->idiag_retrans
= icsk
->icsk_retransmits
;
188 r
->idiag_expires
= EXPIRES_IN_MS(icsk
->icsk_timeout
);
189 } else if (icsk
->icsk_pending
== ICSK_TIME_PROBE0
) {
191 r
->idiag_retrans
= icsk
->icsk_probes_out
;
192 r
->idiag_expires
= EXPIRES_IN_MS(icsk
->icsk_timeout
);
193 } else if (timer_pending(&sk
->sk_timer
)) {
195 r
->idiag_retrans
= icsk
->icsk_probes_out
;
196 r
->idiag_expires
= EXPIRES_IN_MS(sk
->sk_timer
.expires
);
199 r
->idiag_expires
= 0;
203 if (ext
& (1 << (INET_DIAG_INFO
- 1))) {
204 attr
= nla_reserve(skb
, INET_DIAG_INFO
,
205 sizeof(struct tcp_info
));
209 info
= nla_data(attr
);
212 if (ext
& (1 << (INET_DIAG_CONG
- 1))) {
216 ca_ops
= READ_ONCE(icsk
->icsk_ca_ops
);
218 err
= nla_put_string(skb
, INET_DIAG_CONG
, ca_ops
->name
);
224 handler
->idiag_get_info(sk
, r
, info
);
226 if (sk
->sk_state
< TCP_TIME_WAIT
) {
227 union tcp_cc_info info
;
232 ca_ops
= READ_ONCE(icsk
->icsk_ca_ops
);
233 if (ca_ops
&& ca_ops
->get_info
)
234 sz
= ca_ops
->get_info(sk
, ext
, &attr
, &info
);
236 if (sz
&& nla_put(skb
, attr
, sz
, &info
) < 0)
245 nlmsg_cancel(skb
, nlh
);
248 EXPORT_SYMBOL_GPL(inet_sk_diag_fill
);
250 static int inet_csk_diag_fill(struct sock
*sk
,
252 const struct inet_diag_req_v2
*req
,
253 struct user_namespace
*user_ns
,
254 u32 portid
, u32 seq
, u16 nlmsg_flags
,
255 const struct nlmsghdr
*unlh
)
257 return inet_sk_diag_fill(sk
, inet_csk(sk
), skb
, req
,
258 user_ns
, portid
, seq
, nlmsg_flags
, unlh
);
261 static int inet_twsk_diag_fill(struct sock
*sk
,
263 u32 portid
, u32 seq
, u16 nlmsg_flags
,
264 const struct nlmsghdr
*unlh
)
266 struct inet_timewait_sock
*tw
= inet_twsk(sk
);
267 struct inet_diag_msg
*r
;
268 struct nlmsghdr
*nlh
;
271 nlh
= nlmsg_put(skb
, portid
, seq
, unlh
->nlmsg_type
, sizeof(*r
),
277 BUG_ON(tw
->tw_state
!= TCP_TIME_WAIT
);
279 tmo
= tw
->tw_timer
.expires
- jiffies
;
283 inet_diag_msg_common_fill(r
, sk
);
284 r
->idiag_retrans
= 0;
286 r
->idiag_state
= tw
->tw_substate
;
288 r
->idiag_expires
= jiffies_to_msecs(tmo
);
298 static int inet_req_diag_fill(struct sock
*sk
, struct sk_buff
*skb
,
299 u32 portid
, u32 seq
, u16 nlmsg_flags
,
300 const struct nlmsghdr
*unlh
)
302 struct inet_diag_msg
*r
;
303 struct nlmsghdr
*nlh
;
306 nlh
= nlmsg_put(skb
, portid
, seq
, unlh
->nlmsg_type
, sizeof(*r
),
312 inet_diag_msg_common_fill(r
, sk
);
313 r
->idiag_state
= TCP_SYN_RECV
;
315 r
->idiag_retrans
= inet_reqsk(sk
)->num_retrans
;
317 BUILD_BUG_ON(offsetof(struct inet_request_sock
, ir_cookie
) !=
318 offsetof(struct sock
, sk_cookie
));
320 tmo
= inet_reqsk(sk
)->rsk_timer
.expires
- jiffies
;
321 r
->idiag_expires
= (tmo
>= 0) ? jiffies_to_msecs(tmo
) : 0;
331 static int sk_diag_fill(struct sock
*sk
, struct sk_buff
*skb
,
332 const struct inet_diag_req_v2
*r
,
333 struct user_namespace
*user_ns
,
334 u32 portid
, u32 seq
, u16 nlmsg_flags
,
335 const struct nlmsghdr
*unlh
)
337 if (sk
->sk_state
== TCP_TIME_WAIT
)
338 return inet_twsk_diag_fill(sk
, skb
, portid
, seq
,
341 if (sk
->sk_state
== TCP_NEW_SYN_RECV
)
342 return inet_req_diag_fill(sk
, skb
, portid
, seq
,
345 return inet_csk_diag_fill(sk
, skb
, r
, user_ns
, portid
, seq
,
349 int inet_diag_dump_one_icsk(struct inet_hashinfo
*hashinfo
,
350 struct sk_buff
*in_skb
,
351 const struct nlmsghdr
*nlh
,
352 const struct inet_diag_req_v2
*req
)
354 struct net
*net
= sock_net(in_skb
->sk
);
360 if (req
->sdiag_family
== AF_INET
)
361 sk
= inet_lookup(net
, hashinfo
, req
->id
.idiag_dst
[0],
362 req
->id
.idiag_dport
, req
->id
.idiag_src
[0],
363 req
->id
.idiag_sport
, req
->id
.idiag_if
);
364 #if IS_ENABLED(CONFIG_IPV6)
365 else if (req
->sdiag_family
== AF_INET6
)
366 sk
= inet6_lookup(net
, hashinfo
,
367 (struct in6_addr
*)req
->id
.idiag_dst
,
369 (struct in6_addr
*)req
->id
.idiag_src
,
380 err
= sock_diag_check_cookie(sk
, req
->id
.idiag_cookie
);
384 rep
= nlmsg_new(inet_sk_attr_size(), GFP_KERNEL
);
390 err
= sk_diag_fill(sk
, rep
, req
,
391 sk_user_ns(NETLINK_CB(in_skb
).sk
),
392 NETLINK_CB(in_skb
).portid
,
393 nlh
->nlmsg_seq
, 0, nlh
);
395 WARN_ON(err
== -EMSGSIZE
);
399 err
= netlink_unicast(net
->diag_nlsk
, rep
, NETLINK_CB(in_skb
).portid
,
411 EXPORT_SYMBOL_GPL(inet_diag_dump_one_icsk
);
413 static int inet_diag_get_exact(struct sk_buff
*in_skb
,
414 const struct nlmsghdr
*nlh
,
415 const struct inet_diag_req_v2
*req
)
417 const struct inet_diag_handler
*handler
;
420 handler
= inet_diag_lock_handler(req
->sdiag_protocol
);
422 err
= PTR_ERR(handler
);
424 err
= handler
->dump_one(in_skb
, nlh
, req
);
425 inet_diag_unlock_handler(handler
);
430 static int bitstring_match(const __be32
*a1
, const __be32
*a2
, int bits
)
432 int words
= bits
>> 5;
437 if (memcmp(a1
, a2
, words
<< 2))
447 mask
= htonl((0xffffffff) << (32 - bits
));
449 if ((w1
^ w2
) & mask
)
456 static int inet_diag_bc_run(const struct nlattr
*_bc
,
457 const struct inet_diag_entry
*entry
)
459 const void *bc
= nla_data(_bc
);
460 int len
= nla_len(_bc
);
464 const struct inet_diag_bc_op
*op
= bc
;
467 case INET_DIAG_BC_NOP
:
469 case INET_DIAG_BC_JMP
:
472 case INET_DIAG_BC_S_GE
:
473 yes
= entry
->sport
>= op
[1].no
;
475 case INET_DIAG_BC_S_LE
:
476 yes
= entry
->sport
<= op
[1].no
;
478 case INET_DIAG_BC_D_GE
:
479 yes
= entry
->dport
>= op
[1].no
;
481 case INET_DIAG_BC_D_LE
:
482 yes
= entry
->dport
<= op
[1].no
;
484 case INET_DIAG_BC_AUTO
:
485 yes
= !(entry
->userlocks
& SOCK_BINDPORT_LOCK
);
487 case INET_DIAG_BC_S_COND
:
488 case INET_DIAG_BC_D_COND
: {
489 const struct inet_diag_hostcond
*cond
;
492 cond
= (const struct inet_diag_hostcond
*)(op
+ 1);
493 if (cond
->port
!= -1 &&
494 cond
->port
!= (op
->code
== INET_DIAG_BC_S_COND
?
495 entry
->sport
: entry
->dport
)) {
500 if (op
->code
== INET_DIAG_BC_S_COND
)
505 if (cond
->family
!= AF_UNSPEC
&&
506 cond
->family
!= entry
->family
) {
507 if (entry
->family
== AF_INET6
&&
508 cond
->family
== AF_INET
) {
509 if (addr
[0] == 0 && addr
[1] == 0 &&
510 addr
[2] == htonl(0xffff) &&
511 bitstring_match(addr
+ 3,
520 if (cond
->prefix_len
== 0)
522 if (bitstring_match(addr
, cond
->addr
,
541 /* This helper is available for all sockets (ESTABLISH, TIMEWAIT, SYN_RECV)
543 static void entry_fill_addrs(struct inet_diag_entry
*entry
,
544 const struct sock
*sk
)
546 #if IS_ENABLED(CONFIG_IPV6)
547 if (sk
->sk_family
== AF_INET6
) {
548 entry
->saddr
= sk
->sk_v6_rcv_saddr
.s6_addr32
;
549 entry
->daddr
= sk
->sk_v6_daddr
.s6_addr32
;
553 entry
->saddr
= &sk
->sk_rcv_saddr
;
554 entry
->daddr
= &sk
->sk_daddr
;
558 int inet_diag_bc_sk(const struct nlattr
*bc
, struct sock
*sk
)
560 struct inet_sock
*inet
= inet_sk(sk
);
561 struct inet_diag_entry entry
;
566 entry
.family
= sk
->sk_family
;
567 entry_fill_addrs(&entry
, sk
);
568 entry
.sport
= inet
->inet_num
;
569 entry
.dport
= ntohs(inet
->inet_dport
);
570 entry
.userlocks
= sk_fullsock(sk
) ? sk
->sk_userlocks
: 0;
572 return inet_diag_bc_run(bc
, &entry
);
574 EXPORT_SYMBOL_GPL(inet_diag_bc_sk
);
576 static int valid_cc(const void *bc
, int len
, int cc
)
579 const struct inet_diag_bc_op
*op
= bc
;
585 if (op
->yes
< 4 || op
->yes
& 3)
593 /* Validate an inet_diag_hostcond. */
594 static bool valid_hostcond(const struct inet_diag_bc_op
*op
, int len
,
597 struct inet_diag_hostcond
*cond
;
600 /* Check hostcond space. */
601 *min_len
+= sizeof(struct inet_diag_hostcond
);
604 cond
= (struct inet_diag_hostcond
*)(op
+ 1);
606 /* Check address family and address length. */
607 switch (cond
->family
) {
612 addr_len
= sizeof(struct in_addr
);
615 addr_len
= sizeof(struct in6_addr
);
620 *min_len
+= addr_len
;
624 /* Check prefix length (in bits) vs address length (in bytes). */
625 if (cond
->prefix_len
> 8 * addr_len
)
631 /* Validate a port comparison operator. */
632 static bool valid_port_comparison(const struct inet_diag_bc_op
*op
,
633 int len
, int *min_len
)
635 /* Port comparisons put the port in a follow-on inet_diag_bc_op. */
636 *min_len
+= sizeof(struct inet_diag_bc_op
);
642 static int inet_diag_bc_audit(const void *bytecode
, int bytecode_len
)
644 const void *bc
= bytecode
;
645 int len
= bytecode_len
;
648 int min_len
= sizeof(struct inet_diag_bc_op
);
649 const struct inet_diag_bc_op
*op
= bc
;
652 case INET_DIAG_BC_S_COND
:
653 case INET_DIAG_BC_D_COND
:
654 if (!valid_hostcond(bc
, len
, &min_len
))
657 case INET_DIAG_BC_S_GE
:
658 case INET_DIAG_BC_S_LE
:
659 case INET_DIAG_BC_D_GE
:
660 case INET_DIAG_BC_D_LE
:
661 if (!valid_port_comparison(bc
, len
, &min_len
))
664 case INET_DIAG_BC_AUTO
:
665 case INET_DIAG_BC_JMP
:
666 case INET_DIAG_BC_NOP
:
672 if (op
->code
!= INET_DIAG_BC_NOP
) {
673 if (op
->no
< min_len
|| op
->no
> len
+ 4 || op
->no
& 3)
676 !valid_cc(bytecode
, bytecode_len
, len
- op
->no
))
680 if (op
->yes
< min_len
|| op
->yes
> len
+ 4 || op
->yes
& 3)
685 return len
== 0 ? 0 : -EINVAL
;
688 static int inet_csk_diag_dump(struct sock
*sk
,
690 struct netlink_callback
*cb
,
691 const struct inet_diag_req_v2
*r
,
692 const struct nlattr
*bc
)
694 if (!inet_diag_bc_sk(bc
, sk
))
697 return inet_csk_diag_fill(sk
, skb
, r
,
698 sk_user_ns(NETLINK_CB(cb
->skb
).sk
),
699 NETLINK_CB(cb
->skb
).portid
,
700 cb
->nlh
->nlmsg_seq
, NLM_F_MULTI
, cb
->nlh
);
703 static void twsk_build_assert(void)
705 BUILD_BUG_ON(offsetof(struct inet_timewait_sock
, tw_family
) !=
706 offsetof(struct sock
, sk_family
));
708 BUILD_BUG_ON(offsetof(struct inet_timewait_sock
, tw_num
) !=
709 offsetof(struct inet_sock
, inet_num
));
711 BUILD_BUG_ON(offsetof(struct inet_timewait_sock
, tw_dport
) !=
712 offsetof(struct inet_sock
, inet_dport
));
714 BUILD_BUG_ON(offsetof(struct inet_timewait_sock
, tw_rcv_saddr
) !=
715 offsetof(struct inet_sock
, inet_rcv_saddr
));
717 BUILD_BUG_ON(offsetof(struct inet_timewait_sock
, tw_daddr
) !=
718 offsetof(struct inet_sock
, inet_daddr
));
720 #if IS_ENABLED(CONFIG_IPV6)
721 BUILD_BUG_ON(offsetof(struct inet_timewait_sock
, tw_v6_rcv_saddr
) !=
722 offsetof(struct sock
, sk_v6_rcv_saddr
));
724 BUILD_BUG_ON(offsetof(struct inet_timewait_sock
, tw_v6_daddr
) !=
725 offsetof(struct sock
, sk_v6_daddr
));
729 static int inet_diag_dump_reqs(struct sk_buff
*skb
, struct sock
*sk
,
730 struct netlink_callback
*cb
,
731 const struct inet_diag_req_v2
*r
,
732 const struct nlattr
*bc
)
734 struct inet_connection_sock
*icsk
= inet_csk(sk
);
735 struct inet_sock
*inet
= inet_sk(sk
);
736 struct inet_diag_entry entry
;
737 int j
, s_j
, reqnum
, s_reqnum
;
738 struct listen_sock
*lopt
;
742 s_reqnum
= cb
->args
[4];
747 entry
.family
= sk
->sk_family
;
749 spin_lock_bh(&icsk
->icsk_accept_queue
.syn_wait_lock
);
751 lopt
= icsk
->icsk_accept_queue
.listen_opt
;
752 if (!lopt
|| !listen_sock_qlen(lopt
))
756 entry
.sport
= inet
->inet_num
;
757 entry
.userlocks
= sk
->sk_userlocks
;
760 for (j
= s_j
; j
< lopt
->nr_table_entries
; j
++) {
761 struct request_sock
*req
, *head
= lopt
->syn_table
[j
];
764 for (req
= head
; req
; reqnum
++, req
= req
->dl_next
) {
765 struct inet_request_sock
*ireq
= inet_rsk(req
);
767 if (reqnum
< s_reqnum
)
769 if (r
->id
.idiag_dport
!= ireq
->ir_rmt_port
&&
774 /* Note: entry.sport and entry.userlocks are already set */
775 entry_fill_addrs(&entry
, req_to_sk(req
));
776 entry
.dport
= ntohs(ireq
->ir_rmt_port
);
778 if (!inet_diag_bc_run(bc
, &entry
))
782 err
= inet_req_diag_fill(req_to_sk(req
), skb
,
783 NETLINK_CB(cb
->skb
).portid
,
785 NLM_F_MULTI
, cb
->nlh
);
788 cb
->args
[4] = reqnum
;
797 spin_unlock_bh(&icsk
->icsk_accept_queue
.syn_wait_lock
);
802 void inet_diag_dump_icsk(struct inet_hashinfo
*hashinfo
, struct sk_buff
*skb
,
803 struct netlink_callback
*cb
,
804 const struct inet_diag_req_v2
*r
, struct nlattr
*bc
)
806 struct net
*net
= sock_net(skb
->sk
);
807 int i
, num
, s_i
, s_num
;
810 s_num
= num
= cb
->args
[2];
812 if (cb
->args
[0] == 0) {
813 if (!(r
->idiag_states
& (TCPF_LISTEN
| TCPF_SYN_RECV
)))
816 for (i
= s_i
; i
< INET_LHTABLE_SIZE
; i
++) {
817 struct inet_listen_hashbucket
*ilb
;
818 struct hlist_nulls_node
*node
;
822 ilb
= &hashinfo
->listening_hash
[i
];
823 spin_lock_bh(&ilb
->lock
);
824 sk_nulls_for_each(sk
, node
, &ilb
->head
) {
825 struct inet_sock
*inet
= inet_sk(sk
);
827 if (!net_eq(sock_net(sk
), net
))
835 if (r
->sdiag_family
!= AF_UNSPEC
&&
836 sk
->sk_family
!= r
->sdiag_family
)
839 if (r
->id
.idiag_sport
!= inet
->inet_sport
&&
843 if (!(r
->idiag_states
& TCPF_LISTEN
) ||
848 if (inet_csk_diag_dump(sk
, skb
, cb
, r
, bc
) < 0) {
849 spin_unlock_bh(&ilb
->lock
);
854 if (!(r
->idiag_states
& TCPF_SYN_RECV
))
857 if (inet_diag_dump_reqs(skb
, sk
, cb
, r
, bc
) < 0) {
858 spin_unlock_bh(&ilb
->lock
);
867 spin_unlock_bh(&ilb
->lock
);
875 s_i
= num
= s_num
= 0;
878 if (!(r
->idiag_states
& ~(TCPF_LISTEN
| TCPF_SYN_RECV
)))
881 for (i
= s_i
; i
<= hashinfo
->ehash_mask
; i
++) {
882 struct inet_ehash_bucket
*head
= &hashinfo
->ehash
[i
];
883 spinlock_t
*lock
= inet_ehash_lockp(hashinfo
, i
);
884 struct hlist_nulls_node
*node
;
889 if (hlist_nulls_empty(&head
->chain
))
896 sk_nulls_for_each(sk
, node
, &head
->chain
) {
899 if (!net_eq(sock_net(sk
), net
))
903 state
= (sk
->sk_state
== TCP_TIME_WAIT
) ?
904 inet_twsk(sk
)->tw_substate
: sk
->sk_state
;
905 if (!(r
->idiag_states
& (1 << state
)))
907 if (r
->sdiag_family
!= AF_UNSPEC
&&
908 sk
->sk_family
!= r
->sdiag_family
)
910 if (r
->id
.idiag_sport
!= htons(sk
->sk_num
) &&
913 if (r
->id
.idiag_dport
!= sk
->sk_dport
&&
918 if (!inet_diag_bc_sk(bc
, sk
))
921 res
= sk_diag_fill(sk
, skb
, r
,
922 sk_user_ns(NETLINK_CB(cb
->skb
).sk
),
923 NETLINK_CB(cb
->skb
).portid
,
924 cb
->nlh
->nlmsg_seq
, NLM_F_MULTI
,
927 spin_unlock_bh(lock
);
934 spin_unlock_bh(lock
);
943 EXPORT_SYMBOL_GPL(inet_diag_dump_icsk
);
945 static int __inet_diag_dump(struct sk_buff
*skb
, struct netlink_callback
*cb
,
946 const struct inet_diag_req_v2
*r
,
949 const struct inet_diag_handler
*handler
;
952 handler
= inet_diag_lock_handler(r
->sdiag_protocol
);
953 if (!IS_ERR(handler
))
954 handler
->dump(skb
, cb
, r
, bc
);
956 err
= PTR_ERR(handler
);
957 inet_diag_unlock_handler(handler
);
959 return err
? : skb
->len
;
962 static int inet_diag_dump(struct sk_buff
*skb
, struct netlink_callback
*cb
)
964 int hdrlen
= sizeof(struct inet_diag_req_v2
);
965 struct nlattr
*bc
= NULL
;
967 if (nlmsg_attrlen(cb
->nlh
, hdrlen
))
968 bc
= nlmsg_find_attr(cb
->nlh
, hdrlen
, INET_DIAG_REQ_BYTECODE
);
970 return __inet_diag_dump(skb
, cb
, nlmsg_data(cb
->nlh
), bc
);
973 static int inet_diag_type2proto(int type
)
976 case TCPDIAG_GETSOCK
:
978 case DCCPDIAG_GETSOCK
:
985 static int inet_diag_dump_compat(struct sk_buff
*skb
,
986 struct netlink_callback
*cb
)
988 struct inet_diag_req
*rc
= nlmsg_data(cb
->nlh
);
989 int hdrlen
= sizeof(struct inet_diag_req
);
990 struct inet_diag_req_v2 req
;
991 struct nlattr
*bc
= NULL
;
993 req
.sdiag_family
= AF_UNSPEC
; /* compatibility */
994 req
.sdiag_protocol
= inet_diag_type2proto(cb
->nlh
->nlmsg_type
);
995 req
.idiag_ext
= rc
->idiag_ext
;
996 req
.idiag_states
= rc
->idiag_states
;
999 if (nlmsg_attrlen(cb
->nlh
, hdrlen
))
1000 bc
= nlmsg_find_attr(cb
->nlh
, hdrlen
, INET_DIAG_REQ_BYTECODE
);
1002 return __inet_diag_dump(skb
, cb
, &req
, bc
);
1005 static int inet_diag_get_exact_compat(struct sk_buff
*in_skb
,
1006 const struct nlmsghdr
*nlh
)
1008 struct inet_diag_req
*rc
= nlmsg_data(nlh
);
1009 struct inet_diag_req_v2 req
;
1011 req
.sdiag_family
= rc
->idiag_family
;
1012 req
.sdiag_protocol
= inet_diag_type2proto(nlh
->nlmsg_type
);
1013 req
.idiag_ext
= rc
->idiag_ext
;
1014 req
.idiag_states
= rc
->idiag_states
;
1017 return inet_diag_get_exact(in_skb
, nlh
, &req
);
1020 static int inet_diag_rcv_msg_compat(struct sk_buff
*skb
, struct nlmsghdr
*nlh
)
1022 int hdrlen
= sizeof(struct inet_diag_req
);
1023 struct net
*net
= sock_net(skb
->sk
);
1025 if (nlh
->nlmsg_type
>= INET_DIAG_GETSOCK_MAX
||
1026 nlmsg_len(nlh
) < hdrlen
)
1029 if (nlh
->nlmsg_flags
& NLM_F_DUMP
) {
1030 if (nlmsg_attrlen(nlh
, hdrlen
)) {
1031 struct nlattr
*attr
;
1033 attr
= nlmsg_find_attr(nlh
, hdrlen
,
1034 INET_DIAG_REQ_BYTECODE
);
1036 nla_len(attr
) < sizeof(struct inet_diag_bc_op
) ||
1037 inet_diag_bc_audit(nla_data(attr
), nla_len(attr
)))
1041 struct netlink_dump_control c
= {
1042 .dump
= inet_diag_dump_compat
,
1044 return netlink_dump_start(net
->diag_nlsk
, skb
, nlh
, &c
);
1048 return inet_diag_get_exact_compat(skb
, nlh
);
1051 static int inet_diag_handler_dump(struct sk_buff
*skb
, struct nlmsghdr
*h
)
1053 int hdrlen
= sizeof(struct inet_diag_req_v2
);
1054 struct net
*net
= sock_net(skb
->sk
);
1056 if (nlmsg_len(h
) < hdrlen
)
1059 if (h
->nlmsg_flags
& NLM_F_DUMP
) {
1060 if (nlmsg_attrlen(h
, hdrlen
)) {
1061 struct nlattr
*attr
;
1063 attr
= nlmsg_find_attr(h
, hdrlen
,
1064 INET_DIAG_REQ_BYTECODE
);
1066 nla_len(attr
) < sizeof(struct inet_diag_bc_op
) ||
1067 inet_diag_bc_audit(nla_data(attr
), nla_len(attr
)))
1071 struct netlink_dump_control c
= {
1072 .dump
= inet_diag_dump
,
1074 return netlink_dump_start(net
->diag_nlsk
, skb
, h
, &c
);
1078 return inet_diag_get_exact(skb
, h
, nlmsg_data(h
));
1081 static const struct sock_diag_handler inet_diag_handler
= {
1083 .dump
= inet_diag_handler_dump
,
1086 static const struct sock_diag_handler inet6_diag_handler
= {
1088 .dump
= inet_diag_handler_dump
,
1091 int inet_diag_register(const struct inet_diag_handler
*h
)
1093 const __u16 type
= h
->idiag_type
;
1096 if (type
>= IPPROTO_MAX
)
1099 mutex_lock(&inet_diag_table_mutex
);
1101 if (!inet_diag_table
[type
]) {
1102 inet_diag_table
[type
] = h
;
1105 mutex_unlock(&inet_diag_table_mutex
);
1109 EXPORT_SYMBOL_GPL(inet_diag_register
);
1111 void inet_diag_unregister(const struct inet_diag_handler
*h
)
1113 const __u16 type
= h
->idiag_type
;
1115 if (type
>= IPPROTO_MAX
)
1118 mutex_lock(&inet_diag_table_mutex
);
1119 inet_diag_table
[type
] = NULL
;
1120 mutex_unlock(&inet_diag_table_mutex
);
1122 EXPORT_SYMBOL_GPL(inet_diag_unregister
);
1124 static int __init
inet_diag_init(void)
1126 const int inet_diag_table_size
= (IPPROTO_MAX
*
1127 sizeof(struct inet_diag_handler
*));
1130 inet_diag_table
= kzalloc(inet_diag_table_size
, GFP_KERNEL
);
1131 if (!inet_diag_table
)
1134 err
= sock_diag_register(&inet_diag_handler
);
1138 err
= sock_diag_register(&inet6_diag_handler
);
1142 sock_diag_register_inet_compat(inet_diag_rcv_msg_compat
);
1147 sock_diag_unregister(&inet_diag_handler
);
1149 kfree(inet_diag_table
);
1153 static void __exit
inet_diag_exit(void)
1155 sock_diag_unregister(&inet6_diag_handler
);
1156 sock_diag_unregister(&inet_diag_handler
);
1157 sock_diag_unregister_inet_compat(inet_diag_rcv_msg_compat
);
1158 kfree(inet_diag_table
);
1161 module_init(inet_diag_init
);
1162 module_exit(inet_diag_exit
);
1163 MODULE_LICENSE("GPL");
1164 MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_NETLINK
, NETLINK_SOCK_DIAG
, 2 /* AF_INET */);
1165 MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_NETLINK
, NETLINK_SOCK_DIAG
, 10 /* AF_INET6 */);