1 #include <linux/types.h>
2 #include <linux/spinlock.h>
3 #include <linux/sock_diag.h>
4 #include <linux/unix_diag.h>
5 #include <linux/skbuff.h>
6 #include <linux/module.h>
7 #include <net/netlink.h>
8 #include <net/af_unix.h>
9 #include <net/tcp_states.h>
11 #define UNIX_DIAG_PUT(skb, attrtype, attrlen) \
12 RTA_DATA(__RTA_PUT(skb, attrtype, attrlen))
14 static int sk_diag_dump_name(struct sock
*sk
, struct sk_buff
*nlskb
)
16 struct unix_address
*addr
= unix_sk(sk
)->addr
;
20 s
= UNIX_DIAG_PUT(nlskb
, UNIX_DIAG_NAME
, addr
->len
- sizeof(short));
21 memcpy(s
, addr
->name
->sun_path
, addr
->len
- sizeof(short));
30 static int sk_diag_dump_vfs(struct sock
*sk
, struct sk_buff
*nlskb
)
32 struct dentry
*dentry
= unix_sk(sk
)->dentry
;
33 struct unix_diag_vfs
*uv
;
36 uv
= UNIX_DIAG_PUT(nlskb
, UNIX_DIAG_VFS
, sizeof(*uv
));
37 uv
->udiag_vfs_ino
= dentry
->d_inode
->i_ino
;
38 uv
->udiag_vfs_dev
= dentry
->d_sb
->s_dev
;
47 static int sk_diag_dump_peer(struct sock
*sk
, struct sk_buff
*nlskb
)
52 peer
= unix_peer_get(sk
);
54 unix_state_lock(peer
);
55 ino
= sock_i_ino(peer
);
56 unix_state_unlock(peer
);
59 RTA_PUT_U32(nlskb
, UNIX_DIAG_PEER
, ino
);
67 static int sk_diag_dump_icons(struct sock
*sk
, struct sk_buff
*nlskb
)
73 if (sk
->sk_state
== TCP_LISTEN
) {
74 spin_lock(&sk
->sk_receive_queue
.lock
);
75 buf
= UNIX_DIAG_PUT(nlskb
, UNIX_DIAG_ICONS
,
76 sk
->sk_receive_queue
.qlen
* sizeof(u32
));
78 skb_queue_walk(&sk
->sk_receive_queue
, skb
) {
79 struct sock
*req
, *peer
;
83 * The state lock is outer for the same sk's
84 * queue lock. With the other's queue locked it's
85 * OK to lock the state.
87 unix_state_lock_nested(req
);
88 peer
= unix_sk(req
)->peer
;
89 buf
[i
++] = (peer
? sock_i_ino(peer
) : 0);
90 unix_state_unlock(req
);
92 spin_unlock(&sk
->sk_receive_queue
.lock
);
98 spin_unlock(&sk
->sk_receive_queue
.lock
);
102 static int sk_diag_show_rqlen(struct sock
*sk
, struct sk_buff
*nlskb
)
104 struct unix_diag_rqlen
*rql
;
106 rql
= UNIX_DIAG_PUT(nlskb
, UNIX_DIAG_RQLEN
, sizeof(*rql
));
108 if (sk
->sk_state
== TCP_LISTEN
) {
109 rql
->udiag_rqueue
= sk
->sk_receive_queue
.qlen
;
110 rql
->udiag_wqueue
= sk
->sk_max_ack_backlog
;
112 rql
->udiag_rqueue
= (__u32
)unix_inq_len(sk
);
113 rql
->udiag_wqueue
= (__u32
)unix_outq_len(sk
);
122 static int sk_diag_fill(struct sock
*sk
, struct sk_buff
*skb
, struct unix_diag_req
*req
,
123 u32 pid
, u32 seq
, u32 flags
, int sk_ino
)
125 unsigned char *b
= skb_tail_pointer(skb
);
126 struct nlmsghdr
*nlh
;
127 struct unix_diag_msg
*rep
;
129 nlh
= NLMSG_PUT(skb
, pid
, seq
, SOCK_DIAG_BY_FAMILY
, sizeof(*rep
));
130 nlh
->nlmsg_flags
= flags
;
132 rep
= NLMSG_DATA(nlh
);
134 rep
->udiag_family
= AF_UNIX
;
135 rep
->udiag_type
= sk
->sk_type
;
136 rep
->udiag_state
= sk
->sk_state
;
137 rep
->udiag_ino
= sk_ino
;
138 sock_diag_save_cookie(sk
, rep
->udiag_cookie
);
140 if ((req
->udiag_show
& UDIAG_SHOW_NAME
) &&
141 sk_diag_dump_name(sk
, skb
))
144 if ((req
->udiag_show
& UDIAG_SHOW_VFS
) &&
145 sk_diag_dump_vfs(sk
, skb
))
148 if ((req
->udiag_show
& UDIAG_SHOW_PEER
) &&
149 sk_diag_dump_peer(sk
, skb
))
152 if ((req
->udiag_show
& UDIAG_SHOW_ICONS
) &&
153 sk_diag_dump_icons(sk
, skb
))
156 if ((req
->udiag_show
& UDIAG_SHOW_RQLEN
) &&
157 sk_diag_show_rqlen(sk
, skb
))
160 if ((req
->udiag_show
& UDIAG_SHOW_MEMINFO
) &&
161 sock_diag_put_meminfo(sk
, skb
, UNIX_DIAG_MEMINFO
))
164 nlh
->nlmsg_len
= skb_tail_pointer(skb
) - b
;
172 static int sk_diag_dump(struct sock
*sk
, struct sk_buff
*skb
, struct unix_diag_req
*req
,
173 u32 pid
, u32 seq
, u32 flags
)
178 sk_ino
= sock_i_ino(sk
);
179 unix_state_unlock(sk
);
184 return sk_diag_fill(sk
, skb
, req
, pid
, seq
, flags
, sk_ino
);
187 static int unix_diag_dump(struct sk_buff
*skb
, struct netlink_callback
*cb
)
189 struct unix_diag_req
*req
;
190 int num
, s_num
, slot
, s_slot
;
192 req
= NLMSG_DATA(cb
->nlh
);
194 s_slot
= cb
->args
[0];
195 num
= s_num
= cb
->args
[1];
197 spin_lock(&unix_table_lock
);
198 for (slot
= s_slot
; slot
<= UNIX_HASH_SIZE
; s_num
= 0, slot
++) {
200 struct hlist_node
*node
;
203 sk_for_each(sk
, node
, &unix_socket_table
[slot
]) {
206 if (!(req
->udiag_states
& (1 << sk
->sk_state
)))
208 if (sk_diag_dump(sk
, skb
, req
,
209 NETLINK_CB(cb
->skb
).pid
,
218 spin_unlock(&unix_table_lock
);
225 static struct sock
*unix_lookup_by_ino(int ino
)
230 spin_lock(&unix_table_lock
);
231 for (i
= 0; i
<= UNIX_HASH_SIZE
; i
++) {
232 struct hlist_node
*node
;
234 sk_for_each(sk
, node
, &unix_socket_table
[i
])
235 if (ino
== sock_i_ino(sk
)) {
237 spin_unlock(&unix_table_lock
);
243 spin_unlock(&unix_table_lock
);
247 static int unix_diag_get_exact(struct sk_buff
*in_skb
,
248 const struct nlmsghdr
*nlh
,
249 struct unix_diag_req
*req
)
254 unsigned int extra_len
;
256 if (req
->udiag_ino
== 0)
259 sk
= unix_lookup_by_ino(req
->udiag_ino
);
264 err
= sock_diag_check_cookie(sk
, req
->udiag_cookie
);
271 rep
= alloc_skb(NLMSG_SPACE((sizeof(struct unix_diag_msg
) + extra_len
)),
276 err
= sk_diag_fill(sk
, rep
, req
, NETLINK_CB(in_skb
).pid
,
277 nlh
->nlmsg_seq
, 0, req
->udiag_ino
);
281 if (extra_len
>= PAGE_SIZE
)
286 err
= netlink_unicast(sock_diag_nlsk
, rep
, NETLINK_CB(in_skb
).pid
,
297 static int unix_diag_handler_dump(struct sk_buff
*skb
, struct nlmsghdr
*h
)
299 int hdrlen
= sizeof(struct unix_diag_req
);
301 if (nlmsg_len(h
) < hdrlen
)
304 if (h
->nlmsg_flags
& NLM_F_DUMP
)
305 return netlink_dump_start(sock_diag_nlsk
, skb
, h
,
306 unix_diag_dump
, NULL
, 0);
308 return unix_diag_get_exact(skb
, h
, (struct unix_diag_req
*)NLMSG_DATA(h
));
311 static struct sock_diag_handler unix_diag_handler
= {
313 .dump
= unix_diag_handler_dump
,
316 static int __init
unix_diag_init(void)
318 return sock_diag_register(&unix_diag_handler
);
321 static void __exit
unix_diag_exit(void)
323 sock_diag_unregister(&unix_diag_handler
);
326 module_init(unix_diag_init
);
327 module_exit(unix_diag_exit
);
328 MODULE_LICENSE("GPL");
329 MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_NETLINK
, NETLINK_SOCK_DIAG
, 1 /* AF_LOCAL */);