1 #include <linux/types.h>
2 #include <linux/spinlock.h>
3 #include <linux/sock_diag.h>
4 #include <linux/unix_diag.h>
5 #include <linux/skbuff.h>
6 #include <linux/module.h>
7 #include <net/netlink.h>
8 #include <net/af_unix.h>
9 #include <net/tcp_states.h>
11 static int sk_diag_dump_name(struct sock
*sk
, struct sk_buff
*nlskb
)
13 struct unix_address
*addr
= unix_sk(sk
)->addr
;
18 return nla_put(nlskb
, UNIX_DIAG_NAME
, addr
->len
- sizeof(short),
19 addr
->name
->sun_path
);
22 static int sk_diag_dump_vfs(struct sock
*sk
, struct sk_buff
*nlskb
)
24 struct dentry
*dentry
= unix_sk(sk
)->path
.dentry
;
27 struct unix_diag_vfs uv
= {
28 .udiag_vfs_ino
= d_backing_inode(dentry
)->i_ino
,
29 .udiag_vfs_dev
= dentry
->d_sb
->s_dev
,
32 return nla_put(nlskb
, UNIX_DIAG_VFS
, sizeof(uv
), &uv
);
38 static int sk_diag_dump_peer(struct sock
*sk
, struct sk_buff
*nlskb
)
43 peer
= unix_peer_get(sk
);
45 unix_state_lock(peer
);
46 ino
= sock_i_ino(peer
);
47 unix_state_unlock(peer
);
50 return nla_put_u32(nlskb
, UNIX_DIAG_PEER
, ino
);
56 static int sk_diag_dump_icons(struct sock
*sk
, struct sk_buff
*nlskb
)
63 if (sk
->sk_state
== TCP_LISTEN
) {
64 spin_lock(&sk
->sk_receive_queue
.lock
);
66 attr
= nla_reserve(nlskb
, UNIX_DIAG_ICONS
,
67 sk
->sk_receive_queue
.qlen
* sizeof(u32
));
73 skb_queue_walk(&sk
->sk_receive_queue
, skb
) {
74 struct sock
*req
, *peer
;
78 * The state lock is outer for the same sk's
79 * queue lock. With the other's queue locked it's
80 * OK to lock the state.
82 unix_state_lock_nested(req
);
83 peer
= unix_sk(req
)->peer
;
84 buf
[i
++] = (peer
? sock_i_ino(peer
) : 0);
85 unix_state_unlock(req
);
87 spin_unlock(&sk
->sk_receive_queue
.lock
);
93 spin_unlock(&sk
->sk_receive_queue
.lock
);
97 static int sk_diag_show_rqlen(struct sock
*sk
, struct sk_buff
*nlskb
)
99 struct unix_diag_rqlen rql
;
101 if (sk
->sk_state
== TCP_LISTEN
) {
102 rql
.udiag_rqueue
= sk
->sk_receive_queue
.qlen
;
103 rql
.udiag_wqueue
= sk
->sk_max_ack_backlog
;
105 rql
.udiag_rqueue
= (u32
) unix_inq_len(sk
);
106 rql
.udiag_wqueue
= (u32
) unix_outq_len(sk
);
109 return nla_put(nlskb
, UNIX_DIAG_RQLEN
, sizeof(rql
), &rql
);
112 static int sk_diag_fill(struct sock
*sk
, struct sk_buff
*skb
, struct unix_diag_req
*req
,
113 u32 portid
, u32 seq
, u32 flags
, int sk_ino
)
115 struct nlmsghdr
*nlh
;
116 struct unix_diag_msg
*rep
;
118 nlh
= nlmsg_put(skb
, portid
, seq
, SOCK_DIAG_BY_FAMILY
, sizeof(*rep
),
123 rep
= nlmsg_data(nlh
);
124 rep
->udiag_family
= AF_UNIX
;
125 rep
->udiag_type
= sk
->sk_type
;
126 rep
->udiag_state
= sk
->sk_state
;
128 rep
->udiag_ino
= sk_ino
;
129 sock_diag_save_cookie(sk
, rep
->udiag_cookie
);
131 if ((req
->udiag_show
& UDIAG_SHOW_NAME
) &&
132 sk_diag_dump_name(sk
, skb
))
135 if ((req
->udiag_show
& UDIAG_SHOW_VFS
) &&
136 sk_diag_dump_vfs(sk
, skb
))
139 if ((req
->udiag_show
& UDIAG_SHOW_PEER
) &&
140 sk_diag_dump_peer(sk
, skb
))
143 if ((req
->udiag_show
& UDIAG_SHOW_ICONS
) &&
144 sk_diag_dump_icons(sk
, skb
))
147 if ((req
->udiag_show
& UDIAG_SHOW_RQLEN
) &&
148 sk_diag_show_rqlen(sk
, skb
))
151 if ((req
->udiag_show
& UDIAG_SHOW_MEMINFO
) &&
152 sock_diag_put_meminfo(sk
, skb
, UNIX_DIAG_MEMINFO
))
155 if (nla_put_u8(skb
, UNIX_DIAG_SHUTDOWN
, sk
->sk_shutdown
))
162 nlmsg_cancel(skb
, nlh
);
166 static int sk_diag_dump(struct sock
*sk
, struct sk_buff
*skb
, struct unix_diag_req
*req
,
167 u32 portid
, u32 seq
, u32 flags
)
172 sk_ino
= sock_i_ino(sk
);
173 unix_state_unlock(sk
);
178 return sk_diag_fill(sk
, skb
, req
, portid
, seq
, flags
, sk_ino
);
181 static int unix_diag_dump(struct sk_buff
*skb
, struct netlink_callback
*cb
)
183 struct unix_diag_req
*req
;
184 int num
, s_num
, slot
, s_slot
;
185 struct net
*net
= sock_net(skb
->sk
);
187 req
= nlmsg_data(cb
->nlh
);
189 s_slot
= cb
->args
[0];
190 num
= s_num
= cb
->args
[1];
192 spin_lock(&unix_table_lock
);
194 slot
< ARRAY_SIZE(unix_socket_table
);
199 sk_for_each(sk
, &unix_socket_table
[slot
]) {
200 if (!net_eq(sock_net(sk
), net
))
204 if (!(req
->udiag_states
& (1 << sk
->sk_state
)))
206 if (sk_diag_dump(sk
, skb
, req
,
207 NETLINK_CB(cb
->skb
).portid
,
216 spin_unlock(&unix_table_lock
);
223 static struct sock
*unix_lookup_by_ino(unsigned int ino
)
228 spin_lock(&unix_table_lock
);
229 for (i
= 0; i
< ARRAY_SIZE(unix_socket_table
); i
++) {
230 sk_for_each(sk
, &unix_socket_table
[i
])
231 if (ino
== sock_i_ino(sk
)) {
233 spin_unlock(&unix_table_lock
);
239 spin_unlock(&unix_table_lock
);
243 static int unix_diag_get_exact(struct sk_buff
*in_skb
,
244 const struct nlmsghdr
*nlh
,
245 struct unix_diag_req
*req
)
250 unsigned int extra_len
;
251 struct net
*net
= sock_net(in_skb
->sk
);
253 if (req
->udiag_ino
== 0)
256 sk
= unix_lookup_by_ino(req
->udiag_ino
);
260 if (!net_eq(sock_net(sk
), net
))
263 err
= sock_diag_check_cookie(sk
, req
->udiag_cookie
);
270 rep
= nlmsg_new(sizeof(struct unix_diag_msg
) + extra_len
, GFP_KERNEL
);
274 err
= sk_diag_fill(sk
, rep
, req
, NETLINK_CB(in_skb
).portid
,
275 nlh
->nlmsg_seq
, 0, req
->udiag_ino
);
279 if (extra_len
>= PAGE_SIZE
)
284 err
= netlink_unicast(net
->diag_nlsk
, rep
, NETLINK_CB(in_skb
).portid
,
295 static int unix_diag_handler_dump(struct sk_buff
*skb
, struct nlmsghdr
*h
)
297 int hdrlen
= sizeof(struct unix_diag_req
);
298 struct net
*net
= sock_net(skb
->sk
);
300 if (nlmsg_len(h
) < hdrlen
)
303 if (h
->nlmsg_flags
& NLM_F_DUMP
) {
304 struct netlink_dump_control c
= {
305 .dump
= unix_diag_dump
,
307 return netlink_dump_start(net
->diag_nlsk
, skb
, h
, &c
);
309 return unix_diag_get_exact(skb
, h
, nlmsg_data(h
));
312 static const struct sock_diag_handler unix_diag_handler
= {
314 .dump
= unix_diag_handler_dump
,
317 static int __init
unix_diag_init(void)
319 return sock_diag_register(&unix_diag_handler
);
322 static void __exit
unix_diag_exit(void)
324 sock_diag_unregister(&unix_diag_handler
);
327 module_init(unix_diag_init
);
328 module_exit(unix_diag_exit
);
329 MODULE_LICENSE("GPL");
330 MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_NETLINK
, NETLINK_SOCK_DIAG
, 1 /* AF_LOCAL */);