1 #include <linux/types.h>
2 #include <linux/spinlock.h>
3 #include <linux/sock_diag.h>
4 #include <linux/unix_diag.h>
5 #include <linux/skbuff.h>
6 #include <linux/module.h>
7 #include <net/netlink.h>
8 #include <net/af_unix.h>
9 #include <net/tcp_states.h>
11 static int sk_diag_dump_name(struct sock
*sk
, struct sk_buff
*nlskb
)
13 /* might or might not have unix_table_lock */
14 struct unix_address
*addr
= smp_load_acquire(&unix_sk(sk
)->addr
);
19 return nla_put(nlskb
, UNIX_DIAG_NAME
, addr
->len
- sizeof(short),
20 addr
->name
->sun_path
);
23 static int sk_diag_dump_vfs(struct sock
*sk
, struct sk_buff
*nlskb
)
25 struct dentry
*dentry
= unix_sk(sk
)->path
.dentry
;
28 struct unix_diag_vfs uv
= {
29 .udiag_vfs_ino
= d_backing_inode(dentry
)->i_ino
,
30 .udiag_vfs_dev
= dentry
->d_sb
->s_dev
,
33 return nla_put(nlskb
, UNIX_DIAG_VFS
, sizeof(uv
), &uv
);
39 static int sk_diag_dump_peer(struct sock
*sk
, struct sk_buff
*nlskb
)
44 peer
= unix_peer_get(sk
);
46 unix_state_lock(peer
);
47 ino
= sock_i_ino(peer
);
48 unix_state_unlock(peer
);
51 return nla_put_u32(nlskb
, UNIX_DIAG_PEER
, ino
);
57 static int sk_diag_dump_icons(struct sock
*sk
, struct sk_buff
*nlskb
)
64 if (sk
->sk_state
== TCP_LISTEN
) {
65 spin_lock(&sk
->sk_receive_queue
.lock
);
67 attr
= nla_reserve(nlskb
, UNIX_DIAG_ICONS
,
68 sk
->sk_receive_queue
.qlen
* sizeof(u32
));
74 skb_queue_walk(&sk
->sk_receive_queue
, skb
) {
75 struct sock
*req
, *peer
;
79 * The state lock is outer for the same sk's
80 * queue lock. With the other's queue locked it's
81 * OK to lock the state.
83 unix_state_lock_nested(req
);
84 peer
= unix_sk(req
)->peer
;
85 buf
[i
++] = (peer
? sock_i_ino(peer
) : 0);
86 unix_state_unlock(req
);
88 spin_unlock(&sk
->sk_receive_queue
.lock
);
94 spin_unlock(&sk
->sk_receive_queue
.lock
);
98 static int sk_diag_show_rqlen(struct sock
*sk
, struct sk_buff
*nlskb
)
100 struct unix_diag_rqlen rql
;
102 if (sk
->sk_state
== TCP_LISTEN
) {
103 rql
.udiag_rqueue
= sk
->sk_receive_queue
.qlen
;
104 rql
.udiag_wqueue
= sk
->sk_max_ack_backlog
;
106 rql
.udiag_rqueue
= (u32
) unix_inq_len(sk
);
107 rql
.udiag_wqueue
= (u32
) unix_outq_len(sk
);
110 return nla_put(nlskb
, UNIX_DIAG_RQLEN
, sizeof(rql
), &rql
);
113 static int sk_diag_fill(struct sock
*sk
, struct sk_buff
*skb
, struct unix_diag_req
*req
,
114 u32 portid
, u32 seq
, u32 flags
, int sk_ino
)
116 struct nlmsghdr
*nlh
;
117 struct unix_diag_msg
*rep
;
119 nlh
= nlmsg_put(skb
, portid
, seq
, SOCK_DIAG_BY_FAMILY
, sizeof(*rep
),
124 rep
= nlmsg_data(nlh
);
125 rep
->udiag_family
= AF_UNIX
;
126 rep
->udiag_type
= sk
->sk_type
;
127 rep
->udiag_state
= sk
->sk_state
;
129 rep
->udiag_ino
= sk_ino
;
130 sock_diag_save_cookie(sk
, rep
->udiag_cookie
);
132 if ((req
->udiag_show
& UDIAG_SHOW_NAME
) &&
133 sk_diag_dump_name(sk
, skb
))
136 if ((req
->udiag_show
& UDIAG_SHOW_VFS
) &&
137 sk_diag_dump_vfs(sk
, skb
))
140 if ((req
->udiag_show
& UDIAG_SHOW_PEER
) &&
141 sk_diag_dump_peer(sk
, skb
))
144 if ((req
->udiag_show
& UDIAG_SHOW_ICONS
) &&
145 sk_diag_dump_icons(sk
, skb
))
148 if ((req
->udiag_show
& UDIAG_SHOW_RQLEN
) &&
149 sk_diag_show_rqlen(sk
, skb
))
152 if ((req
->udiag_show
& UDIAG_SHOW_MEMINFO
) &&
153 sock_diag_put_meminfo(sk
, skb
, UNIX_DIAG_MEMINFO
))
156 if (nla_put_u8(skb
, UNIX_DIAG_SHUTDOWN
, sk
->sk_shutdown
))
163 nlmsg_cancel(skb
, nlh
);
167 static int sk_diag_dump(struct sock
*sk
, struct sk_buff
*skb
, struct unix_diag_req
*req
,
168 u32 portid
, u32 seq
, u32 flags
)
173 sk_ino
= sock_i_ino(sk
);
174 unix_state_unlock(sk
);
179 return sk_diag_fill(sk
, skb
, req
, portid
, seq
, flags
, sk_ino
);
182 static int unix_diag_dump(struct sk_buff
*skb
, struct netlink_callback
*cb
)
184 struct unix_diag_req
*req
;
185 int num
, s_num
, slot
, s_slot
;
186 struct net
*net
= sock_net(skb
->sk
);
188 req
= nlmsg_data(cb
->nlh
);
190 s_slot
= cb
->args
[0];
191 num
= s_num
= cb
->args
[1];
193 spin_lock(&unix_table_lock
);
195 slot
< ARRAY_SIZE(unix_socket_table
);
200 sk_for_each(sk
, &unix_socket_table
[slot
]) {
201 if (!net_eq(sock_net(sk
), net
))
205 if (!(req
->udiag_states
& (1 << sk
->sk_state
)))
207 if (sk_diag_dump(sk
, skb
, req
,
208 NETLINK_CB(cb
->skb
).portid
,
217 spin_unlock(&unix_table_lock
);
224 static struct sock
*unix_lookup_by_ino(unsigned int ino
)
229 spin_lock(&unix_table_lock
);
230 for (i
= 0; i
< ARRAY_SIZE(unix_socket_table
); i
++) {
231 sk_for_each(sk
, &unix_socket_table
[i
])
232 if (ino
== sock_i_ino(sk
)) {
234 spin_unlock(&unix_table_lock
);
240 spin_unlock(&unix_table_lock
);
244 static int unix_diag_get_exact(struct sk_buff
*in_skb
,
245 const struct nlmsghdr
*nlh
,
246 struct unix_diag_req
*req
)
251 unsigned int extra_len
;
252 struct net
*net
= sock_net(in_skb
->sk
);
254 if (req
->udiag_ino
== 0)
257 sk
= unix_lookup_by_ino(req
->udiag_ino
);
261 if (!net_eq(sock_net(sk
), net
))
264 err
= sock_diag_check_cookie(sk
, req
->udiag_cookie
);
271 rep
= nlmsg_new(sizeof(struct unix_diag_msg
) + extra_len
, GFP_KERNEL
);
275 err
= sk_diag_fill(sk
, rep
, req
, NETLINK_CB(in_skb
).portid
,
276 nlh
->nlmsg_seq
, 0, req
->udiag_ino
);
280 if (extra_len
>= PAGE_SIZE
)
285 err
= netlink_unicast(net
->diag_nlsk
, rep
, NETLINK_CB(in_skb
).portid
,
296 static int unix_diag_handler_dump(struct sk_buff
*skb
, struct nlmsghdr
*h
)
298 int hdrlen
= sizeof(struct unix_diag_req
);
299 struct net
*net
= sock_net(skb
->sk
);
301 if (nlmsg_len(h
) < hdrlen
)
304 if (h
->nlmsg_flags
& NLM_F_DUMP
) {
305 struct netlink_dump_control c
= {
306 .dump
= unix_diag_dump
,
308 return netlink_dump_start(net
->diag_nlsk
, skb
, h
, &c
);
310 return unix_diag_get_exact(skb
, h
, nlmsg_data(h
));
313 static const struct sock_diag_handler unix_diag_handler
= {
315 .dump
= unix_diag_handler_dump
,
318 static int __init
unix_diag_init(void)
320 return sock_diag_register(&unix_diag_handler
);
323 static void __exit
unix_diag_exit(void)
325 sock_diag_unregister(&unix_diag_handler
);
328 module_init(unix_diag_init
);
329 module_exit(unix_diag_exit
);
330 MODULE_LICENSE("GPL");
331 MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_NETLINK
, NETLINK_SOCK_DIAG
, 1 /* AF_LOCAL */);