1 #include <linux/types.h>
2 #include <linux/spinlock.h>
3 #include <linux/sock_diag.h>
4 #include <linux/unix_diag.h>
5 #include <linux/skbuff.h>
6 #include <linux/module.h>
7 #include <net/netlink.h>
8 #include <net/af_unix.h>
9 #include <net/tcp_states.h>
11 static int sk_diag_dump_name(struct sock
*sk
, struct sk_buff
*nlskb
)
13 struct unix_address
*addr
= unix_sk(sk
)->addr
;
18 return nla_put(nlskb
, UNIX_DIAG_NAME
, addr
->len
- sizeof(short),
19 addr
->name
->sun_path
);
22 static int sk_diag_dump_vfs(struct sock
*sk
, struct sk_buff
*nlskb
)
24 struct dentry
*dentry
= unix_sk(sk
)->path
.dentry
;
27 struct unix_diag_vfs uv
= {
28 .udiag_vfs_ino
= dentry
->d_inode
->i_ino
,
29 .udiag_vfs_dev
= dentry
->d_sb
->s_dev
,
32 return nla_put(nlskb
, UNIX_DIAG_VFS
, sizeof(uv
), &uv
);
38 static int sk_diag_dump_peer(struct sock
*sk
, struct sk_buff
*nlskb
)
43 peer
= unix_peer_get(sk
);
45 unix_state_lock(peer
);
46 ino
= sock_i_ino(peer
);
47 unix_state_unlock(peer
);
50 return nla_put_u32(nlskb
, UNIX_DIAG_PEER
, ino
);
56 static int sk_diag_dump_icons(struct sock
*sk
, struct sk_buff
*nlskb
)
63 if (sk
->sk_state
== TCP_LISTEN
) {
64 spin_lock(&sk
->sk_receive_queue
.lock
);
66 attr
= nla_reserve(nlskb
, UNIX_DIAG_ICONS
,
67 sk
->sk_receive_queue
.qlen
* sizeof(u32
));
73 skb_queue_walk(&sk
->sk_receive_queue
, skb
) {
74 struct sock
*req
, *peer
;
78 * The state lock is outer for the same sk's
79 * queue lock. With the other's queue locked it's
80 * OK to lock the state.
82 unix_state_lock_nested(req
);
83 peer
= unix_sk(req
)->peer
;
84 buf
[i
++] = (peer
? sock_i_ino(peer
) : 0);
85 unix_state_unlock(req
);
87 spin_unlock(&sk
->sk_receive_queue
.lock
);
93 spin_unlock(&sk
->sk_receive_queue
.lock
);
97 static int sk_diag_show_rqlen(struct sock
*sk
, struct sk_buff
*nlskb
)
99 struct unix_diag_rqlen rql
;
101 if (sk
->sk_state
== TCP_LISTEN
) {
102 rql
.udiag_rqueue
= sk
->sk_receive_queue
.qlen
;
103 rql
.udiag_wqueue
= sk
->sk_max_ack_backlog
;
105 rql
.udiag_rqueue
= (u32
) unix_inq_len(sk
);
106 rql
.udiag_wqueue
= (u32
) unix_outq_len(sk
);
109 return nla_put(nlskb
, UNIX_DIAG_RQLEN
, sizeof(rql
), &rql
);
112 static int sk_diag_fill(struct sock
*sk
, struct sk_buff
*skb
, struct unix_diag_req
*req
,
113 u32 portid
, u32 seq
, u32 flags
, int sk_ino
)
115 struct nlmsghdr
*nlh
;
116 struct unix_diag_msg
*rep
;
118 nlh
= nlmsg_put(skb
, portid
, seq
, SOCK_DIAG_BY_FAMILY
, sizeof(*rep
),
123 rep
= nlmsg_data(nlh
);
124 rep
->udiag_family
= AF_UNIX
;
125 rep
->udiag_type
= sk
->sk_type
;
126 rep
->udiag_state
= sk
->sk_state
;
127 rep
->udiag_ino
= sk_ino
;
128 sock_diag_save_cookie(sk
, rep
->udiag_cookie
);
130 if ((req
->udiag_show
& UDIAG_SHOW_NAME
) &&
131 sk_diag_dump_name(sk
, skb
))
134 if ((req
->udiag_show
& UDIAG_SHOW_VFS
) &&
135 sk_diag_dump_vfs(sk
, skb
))
138 if ((req
->udiag_show
& UDIAG_SHOW_PEER
) &&
139 sk_diag_dump_peer(sk
, skb
))
142 if ((req
->udiag_show
& UDIAG_SHOW_ICONS
) &&
143 sk_diag_dump_icons(sk
, skb
))
146 if ((req
->udiag_show
& UDIAG_SHOW_RQLEN
) &&
147 sk_diag_show_rqlen(sk
, skb
))
150 if ((req
->udiag_show
& UDIAG_SHOW_MEMINFO
) &&
151 sock_diag_put_meminfo(sk
, skb
, UNIX_DIAG_MEMINFO
))
154 if (nla_put_u8(skb
, UNIX_DIAG_SHUTDOWN
, sk
->sk_shutdown
))
157 return nlmsg_end(skb
, nlh
);
160 nlmsg_cancel(skb
, nlh
);
164 static int sk_diag_dump(struct sock
*sk
, struct sk_buff
*skb
, struct unix_diag_req
*req
,
165 u32 portid
, u32 seq
, u32 flags
)
170 sk_ino
= sock_i_ino(sk
);
171 unix_state_unlock(sk
);
176 return sk_diag_fill(sk
, skb
, req
, portid
, seq
, flags
, sk_ino
);
179 static int unix_diag_dump(struct sk_buff
*skb
, struct netlink_callback
*cb
)
181 struct unix_diag_req
*req
;
182 int num
, s_num
, slot
, s_slot
;
183 struct net
*net
= sock_net(skb
->sk
);
185 req
= nlmsg_data(cb
->nlh
);
187 s_slot
= cb
->args
[0];
188 num
= s_num
= cb
->args
[1];
190 spin_lock(&unix_table_lock
);
192 slot
< ARRAY_SIZE(unix_socket_table
);
197 sk_for_each(sk
, &unix_socket_table
[slot
]) {
198 if (!net_eq(sock_net(sk
), net
))
202 if (!(req
->udiag_states
& (1 << sk
->sk_state
)))
204 if (sk_diag_dump(sk
, skb
, req
,
205 NETLINK_CB(cb
->skb
).portid
,
214 spin_unlock(&unix_table_lock
);
221 static struct sock
*unix_lookup_by_ino(int ino
)
226 spin_lock(&unix_table_lock
);
227 for (i
= 0; i
< ARRAY_SIZE(unix_socket_table
); i
++) {
228 sk_for_each(sk
, &unix_socket_table
[i
])
229 if (ino
== sock_i_ino(sk
)) {
231 spin_unlock(&unix_table_lock
);
237 spin_unlock(&unix_table_lock
);
241 static int unix_diag_get_exact(struct sk_buff
*in_skb
,
242 const struct nlmsghdr
*nlh
,
243 struct unix_diag_req
*req
)
248 unsigned int extra_len
;
249 struct net
*net
= sock_net(in_skb
->sk
);
251 if (req
->udiag_ino
== 0)
254 sk
= unix_lookup_by_ino(req
->udiag_ino
);
259 err
= sock_diag_check_cookie(sk
, req
->udiag_cookie
);
266 rep
= nlmsg_new(sizeof(struct unix_diag_msg
) + extra_len
, GFP_KERNEL
);
270 err
= sk_diag_fill(sk
, rep
, req
, NETLINK_CB(in_skb
).portid
,
271 nlh
->nlmsg_seq
, 0, req
->udiag_ino
);
275 if (extra_len
>= PAGE_SIZE
)
280 err
= netlink_unicast(net
->diag_nlsk
, rep
, NETLINK_CB(in_skb
).portid
,
291 static int unix_diag_handler_dump(struct sk_buff
*skb
, struct nlmsghdr
*h
)
293 int hdrlen
= sizeof(struct unix_diag_req
);
294 struct net
*net
= sock_net(skb
->sk
);
296 if (nlmsg_len(h
) < hdrlen
)
299 if (h
->nlmsg_flags
& NLM_F_DUMP
) {
300 struct netlink_dump_control c
= {
301 .dump
= unix_diag_dump
,
303 return netlink_dump_start(net
->diag_nlsk
, skb
, h
, &c
);
305 return unix_diag_get_exact(skb
, h
, nlmsg_data(h
));
308 static const struct sock_diag_handler unix_diag_handler
= {
310 .dump
= unix_diag_handler_dump
,
313 static int __init
unix_diag_init(void)
315 return sock_diag_register(&unix_diag_handler
);
318 static void __exit
unix_diag_exit(void)
320 sock_diag_unregister(&unix_diag_handler
);
323 module_init(unix_diag_init
);
324 module_exit(unix_diag_exit
);
325 MODULE_LICENSE("GPL");
326 MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_NETLINK
, NETLINK_SOCK_DIAG
, 1 /* AF_LOCAL */);