1 #include <linux/module.h>
2 #include <linux/sock_diag.h>
4 #include <linux/netdevice.h>
5 #include <linux/packet_diag.h>
6 #include <linux/percpu.h>
7 #include <net/net_namespace.h>
12 static int pdiag_put_info(const struct packet_sock
*po
, struct sk_buff
*nlskb
)
14 struct packet_diag_info pinfo
;
16 pinfo
.pdi_index
= po
->ifindex
;
17 pinfo
.pdi_version
= po
->tp_version
;
18 pinfo
.pdi_reserve
= po
->tp_reserve
;
19 pinfo
.pdi_copy_thresh
= po
->copy_thresh
;
20 pinfo
.pdi_tstamp
= po
->tp_tstamp
;
24 pinfo
.pdi_flags
|= PDI_RUNNING
;
26 pinfo
.pdi_flags
|= PDI_AUXDATA
;
28 pinfo
.pdi_flags
|= PDI_ORIGDEV
;
30 pinfo
.pdi_flags
|= PDI_VNETHDR
;
32 pinfo
.pdi_flags
|= PDI_LOSS
;
34 return nla_put(nlskb
, PACKET_DIAG_INFO
, sizeof(pinfo
), &pinfo
);
37 static int pdiag_put_mclist(const struct packet_sock
*po
, struct sk_buff
*nlskb
)
40 struct packet_mclist
*ml
;
42 mca
= nla_nest_start(nlskb
, PACKET_DIAG_MCLIST
);
47 for (ml
= po
->mclist
; ml
; ml
= ml
->next
) {
48 struct packet_diag_mclist
*dml
;
50 dml
= nla_reserve_nohdr(nlskb
, sizeof(*dml
));
53 nla_nest_cancel(nlskb
, mca
);
57 dml
->pdmc_index
= ml
->ifindex
;
58 dml
->pdmc_type
= ml
->type
;
59 dml
->pdmc_alen
= ml
->alen
;
60 dml
->pdmc_count
= ml
->count
;
61 BUILD_BUG_ON(sizeof(dml
->pdmc_addr
) != sizeof(ml
->addr
));
62 memcpy(dml
->pdmc_addr
, ml
->addr
, sizeof(ml
->addr
));
66 nla_nest_end(nlskb
, mca
);
71 static int pdiag_put_ring(struct packet_ring_buffer
*ring
, int ver
, int nl_type
,
72 struct sk_buff
*nlskb
)
74 struct packet_diag_ring pdr
;
76 if (!ring
->pg_vec
|| ((ver
> TPACKET_V2
) &&
77 (nl_type
== PACKET_DIAG_TX_RING
)))
80 pdr
.pdr_block_size
= ring
->pg_vec_pages
<< PAGE_SHIFT
;
81 pdr
.pdr_block_nr
= ring
->pg_vec_len
;
82 pdr
.pdr_frame_size
= ring
->frame_size
;
83 pdr
.pdr_frame_nr
= ring
->frame_max
+ 1;
85 if (ver
> TPACKET_V2
) {
86 pdr
.pdr_retire_tmo
= ring
->prb_bdqc
.retire_blk_tov
;
87 pdr
.pdr_sizeof_priv
= ring
->prb_bdqc
.blk_sizeof_priv
;
88 pdr
.pdr_features
= ring
->prb_bdqc
.feature_req_word
;
90 pdr
.pdr_retire_tmo
= 0;
91 pdr
.pdr_sizeof_priv
= 0;
95 return nla_put(nlskb
, nl_type
, sizeof(pdr
), &pdr
);
98 static int pdiag_put_rings_cfg(struct packet_sock
*po
, struct sk_buff
*skb
)
102 mutex_lock(&po
->pg_vec_lock
);
103 ret
= pdiag_put_ring(&po
->rx_ring
, po
->tp_version
,
104 PACKET_DIAG_RX_RING
, skb
);
106 ret
= pdiag_put_ring(&po
->tx_ring
, po
->tp_version
,
107 PACKET_DIAG_TX_RING
, skb
);
108 mutex_unlock(&po
->pg_vec_lock
);
113 static int pdiag_put_fanout(struct packet_sock
*po
, struct sk_buff
*nlskb
)
117 mutex_lock(&fanout_mutex
);
121 val
= (u32
)po
->fanout
->id
| ((u32
)po
->fanout
->type
<< 16);
122 ret
= nla_put_u32(nlskb
, PACKET_DIAG_FANOUT
, val
);
124 mutex_unlock(&fanout_mutex
);
129 static int sk_diag_fill(struct sock
*sk
, struct sk_buff
*skb
,
130 struct packet_diag_req
*req
,
131 bool may_report_filterinfo
,
132 struct user_namespace
*user_ns
,
133 u32 portid
, u32 seq
, u32 flags
, int sk_ino
)
135 struct nlmsghdr
*nlh
;
136 struct packet_diag_msg
*rp
;
137 struct packet_sock
*po
= pkt_sk(sk
);
139 nlh
= nlmsg_put(skb
, portid
, seq
, SOCK_DIAG_BY_FAMILY
, sizeof(*rp
), flags
);
143 rp
= nlmsg_data(nlh
);
144 rp
->pdiag_family
= AF_PACKET
;
145 rp
->pdiag_type
= sk
->sk_type
;
146 rp
->pdiag_num
= ntohs(po
->num
);
147 rp
->pdiag_ino
= sk_ino
;
148 sock_diag_save_cookie(sk
, rp
->pdiag_cookie
);
150 if ((req
->pdiag_show
& PACKET_SHOW_INFO
) &&
151 pdiag_put_info(po
, skb
))
154 if ((req
->pdiag_show
& PACKET_SHOW_INFO
) &&
155 nla_put_u32(skb
, PACKET_DIAG_UID
,
156 from_kuid_munged(user_ns
, sock_i_uid(sk
))))
159 if ((req
->pdiag_show
& PACKET_SHOW_MCLIST
) &&
160 pdiag_put_mclist(po
, skb
))
163 if ((req
->pdiag_show
& PACKET_SHOW_RING_CFG
) &&
164 pdiag_put_rings_cfg(po
, skb
))
167 if ((req
->pdiag_show
& PACKET_SHOW_FANOUT
) &&
168 pdiag_put_fanout(po
, skb
))
171 if ((req
->pdiag_show
& PACKET_SHOW_MEMINFO
) &&
172 sock_diag_put_meminfo(sk
, skb
, PACKET_DIAG_MEMINFO
))
175 if ((req
->pdiag_show
& PACKET_SHOW_FILTER
) &&
176 sock_diag_put_filterinfo(may_report_filterinfo
, sk
, skb
,
184 nlmsg_cancel(skb
, nlh
);
188 static int packet_diag_dump(struct sk_buff
*skb
, struct netlink_callback
*cb
)
190 int num
= 0, s_num
= cb
->args
[0];
191 struct packet_diag_req
*req
;
194 bool may_report_filterinfo
;
196 net
= sock_net(skb
->sk
);
197 req
= nlmsg_data(cb
->nlh
);
198 may_report_filterinfo
= netlink_net_capable(cb
->skb
, CAP_NET_ADMIN
);
200 mutex_lock(&net
->packet
.sklist_lock
);
201 sk_for_each(sk
, &net
->packet
.sklist
) {
202 if (!net_eq(sock_net(sk
), net
))
207 if (sk_diag_fill(sk
, skb
, req
,
208 may_report_filterinfo
,
209 sk_user_ns(NETLINK_CB(cb
->skb
).sk
),
210 NETLINK_CB(cb
->skb
).portid
,
211 cb
->nlh
->nlmsg_seq
, NLM_F_MULTI
,
218 mutex_unlock(&net
->packet
.sklist_lock
);
224 static int packet_diag_handler_dump(struct sk_buff
*skb
, struct nlmsghdr
*h
)
226 int hdrlen
= sizeof(struct packet_diag_req
);
227 struct net
*net
= sock_net(skb
->sk
);
228 struct packet_diag_req
*req
;
230 if (nlmsg_len(h
) < hdrlen
)
234 /* Make it possible to support protocol filtering later */
235 if (req
->sdiag_protocol
)
238 if (h
->nlmsg_flags
& NLM_F_DUMP
) {
239 struct netlink_dump_control c
= {
240 .dump
= packet_diag_dump
,
242 return netlink_dump_start(net
->diag_nlsk
, skb
, h
, &c
);
247 static const struct sock_diag_handler packet_diag_handler
= {
249 .dump
= packet_diag_handler_dump
,
252 static int __init
packet_diag_init(void)
254 return sock_diag_register(&packet_diag_handler
);
257 static void __exit
packet_diag_exit(void)
259 sock_diag_unregister(&packet_diag_handler
);
262 module_init(packet_diag_init
);
263 module_exit(packet_diag_exit
);
264 MODULE_LICENSE("GPL");
265 MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_NETLINK
, NETLINK_SOCK_DIAG
, 17 /* AF_PACKET */);