dm writecache: fix incorrect flush sequence when doing SSD mode commit
[linux/fpc-iii.git] / net / ipv4 / udp_diag.c
blob910555a4d9fe2dcc1465421afac904d20147dd06
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * udp_diag.c Module for monitoring UDP transport protocols sockets.
5 * Authors: Pavel Emelyanov, <xemul@parallels.com>
6 */
9 #include <linux/module.h>
10 #include <linux/inet_diag.h>
11 #include <linux/udp.h>
12 #include <net/udp.h>
13 #include <net/udplite.h>
14 #include <linux/sock_diag.h>
16 static int sk_diag_dump(struct sock *sk, struct sk_buff *skb,
17 struct netlink_callback *cb,
18 const struct inet_diag_req_v2 *req,
19 struct nlattr *bc, bool net_admin)
21 if (!inet_diag_bc_sk(bc, sk))
22 return 0;
24 return inet_sk_diag_fill(sk, NULL, skb, req,
25 sk_user_ns(NETLINK_CB(cb->skb).sk),
26 NETLINK_CB(cb->skb).portid,
27 cb->nlh->nlmsg_seq, NLM_F_MULTI, cb->nlh, net_admin);
30 static int udp_dump_one(struct udp_table *tbl, struct sk_buff *in_skb,
31 const struct nlmsghdr *nlh,
32 const struct inet_diag_req_v2 *req)
34 int err = -EINVAL;
35 struct sock *sk = NULL;
36 struct sk_buff *rep;
37 struct net *net = sock_net(in_skb->sk);
39 rcu_read_lock();
40 if (req->sdiag_family == AF_INET)
41 /* src and dst are swapped for historical reasons */
42 sk = __udp4_lib_lookup(net,
43 req->id.idiag_src[0], req->id.idiag_sport,
44 req->id.idiag_dst[0], req->id.idiag_dport,
45 req->id.idiag_if, 0, tbl, NULL);
46 #if IS_ENABLED(CONFIG_IPV6)
47 else if (req->sdiag_family == AF_INET6)
48 sk = __udp6_lib_lookup(net,
49 (struct in6_addr *)req->id.idiag_src,
50 req->id.idiag_sport,
51 (struct in6_addr *)req->id.idiag_dst,
52 req->id.idiag_dport,
53 req->id.idiag_if, 0, tbl, NULL);
54 #endif
55 if (sk && !refcount_inc_not_zero(&sk->sk_refcnt))
56 sk = NULL;
57 rcu_read_unlock();
58 err = -ENOENT;
59 if (!sk)
60 goto out_nosk;
62 err = sock_diag_check_cookie(sk, req->id.idiag_cookie);
63 if (err)
64 goto out;
66 err = -ENOMEM;
67 rep = nlmsg_new(sizeof(struct inet_diag_msg) +
68 sizeof(struct inet_diag_meminfo) + 64,
69 GFP_KERNEL);
70 if (!rep)
71 goto out;
73 err = inet_sk_diag_fill(sk, NULL, rep, req,
74 sk_user_ns(NETLINK_CB(in_skb).sk),
75 NETLINK_CB(in_skb).portid,
76 nlh->nlmsg_seq, 0, nlh,
77 netlink_net_capable(in_skb, CAP_NET_ADMIN));
78 if (err < 0) {
79 WARN_ON(err == -EMSGSIZE);
80 kfree_skb(rep);
81 goto out;
83 err = netlink_unicast(net->diag_nlsk, rep, NETLINK_CB(in_skb).portid,
84 MSG_DONTWAIT);
85 if (err > 0)
86 err = 0;
87 out:
88 if (sk)
89 sock_put(sk);
90 out_nosk:
91 return err;
94 static void udp_dump(struct udp_table *table, struct sk_buff *skb,
95 struct netlink_callback *cb,
96 const struct inet_diag_req_v2 *r, struct nlattr *bc)
98 bool net_admin = netlink_net_capable(cb->skb, CAP_NET_ADMIN);
99 struct net *net = sock_net(skb->sk);
100 int num, s_num, slot, s_slot;
102 s_slot = cb->args[0];
103 num = s_num = cb->args[1];
105 for (slot = s_slot; slot <= table->mask; s_num = 0, slot++) {
106 struct udp_hslot *hslot = &table->hash[slot];
107 struct sock *sk;
109 num = 0;
111 if (hlist_empty(&hslot->head))
112 continue;
114 spin_lock_bh(&hslot->lock);
115 sk_for_each(sk, &hslot->head) {
116 struct inet_sock *inet = inet_sk(sk);
118 if (!net_eq(sock_net(sk), net))
119 continue;
120 if (num < s_num)
121 goto next;
122 if (!(r->idiag_states & (1 << sk->sk_state)))
123 goto next;
124 if (r->sdiag_family != AF_UNSPEC &&
125 sk->sk_family != r->sdiag_family)
126 goto next;
127 if (r->id.idiag_sport != inet->inet_sport &&
128 r->id.idiag_sport)
129 goto next;
130 if (r->id.idiag_dport != inet->inet_dport &&
131 r->id.idiag_dport)
132 goto next;
134 if (sk_diag_dump(sk, skb, cb, r, bc, net_admin) < 0) {
135 spin_unlock_bh(&hslot->lock);
136 goto done;
138 next:
139 num++;
141 spin_unlock_bh(&hslot->lock);
143 done:
144 cb->args[0] = slot;
145 cb->args[1] = num;
148 static void udp_diag_dump(struct sk_buff *skb, struct netlink_callback *cb,
149 const struct inet_diag_req_v2 *r, struct nlattr *bc)
151 udp_dump(&udp_table, skb, cb, r, bc);
154 static int udp_diag_dump_one(struct sk_buff *in_skb, const struct nlmsghdr *nlh,
155 const struct inet_diag_req_v2 *req)
157 return udp_dump_one(&udp_table, in_skb, nlh, req);
160 static void udp_diag_get_info(struct sock *sk, struct inet_diag_msg *r,
161 void *info)
163 r->idiag_rqueue = udp_rqueue_get(sk);
164 r->idiag_wqueue = sk_wmem_alloc_get(sk);
167 #ifdef CONFIG_INET_DIAG_DESTROY
168 static int __udp_diag_destroy(struct sk_buff *in_skb,
169 const struct inet_diag_req_v2 *req,
170 struct udp_table *tbl)
172 struct net *net = sock_net(in_skb->sk);
173 struct sock *sk;
174 int err;
176 rcu_read_lock();
178 if (req->sdiag_family == AF_INET)
179 sk = __udp4_lib_lookup(net,
180 req->id.idiag_dst[0], req->id.idiag_dport,
181 req->id.idiag_src[0], req->id.idiag_sport,
182 req->id.idiag_if, 0, tbl, NULL);
183 #if IS_ENABLED(CONFIG_IPV6)
184 else if (req->sdiag_family == AF_INET6) {
185 if (ipv6_addr_v4mapped((struct in6_addr *)req->id.idiag_dst) &&
186 ipv6_addr_v4mapped((struct in6_addr *)req->id.idiag_src))
187 sk = __udp4_lib_lookup(net,
188 req->id.idiag_dst[3], req->id.idiag_dport,
189 req->id.idiag_src[3], req->id.idiag_sport,
190 req->id.idiag_if, 0, tbl, NULL);
192 else
193 sk = __udp6_lib_lookup(net,
194 (struct in6_addr *)req->id.idiag_dst,
195 req->id.idiag_dport,
196 (struct in6_addr *)req->id.idiag_src,
197 req->id.idiag_sport,
198 req->id.idiag_if, 0, tbl, NULL);
200 #endif
201 else {
202 rcu_read_unlock();
203 return -EINVAL;
206 if (sk && !refcount_inc_not_zero(&sk->sk_refcnt))
207 sk = NULL;
209 rcu_read_unlock();
211 if (!sk)
212 return -ENOENT;
214 if (sock_diag_check_cookie(sk, req->id.idiag_cookie)) {
215 sock_put(sk);
216 return -ENOENT;
219 err = sock_diag_destroy(sk, ECONNABORTED);
221 sock_put(sk);
223 return err;
226 static int udp_diag_destroy(struct sk_buff *in_skb,
227 const struct inet_diag_req_v2 *req)
229 return __udp_diag_destroy(in_skb, req, &udp_table);
232 static int udplite_diag_destroy(struct sk_buff *in_skb,
233 const struct inet_diag_req_v2 *req)
235 return __udp_diag_destroy(in_skb, req, &udplite_table);
238 #endif
240 static const struct inet_diag_handler udp_diag_handler = {
241 .dump = udp_diag_dump,
242 .dump_one = udp_diag_dump_one,
243 .idiag_get_info = udp_diag_get_info,
244 .idiag_type = IPPROTO_UDP,
245 .idiag_info_size = 0,
246 #ifdef CONFIG_INET_DIAG_DESTROY
247 .destroy = udp_diag_destroy,
248 #endif
251 static void udplite_diag_dump(struct sk_buff *skb, struct netlink_callback *cb,
252 const struct inet_diag_req_v2 *r,
253 struct nlattr *bc)
255 udp_dump(&udplite_table, skb, cb, r, bc);
258 static int udplite_diag_dump_one(struct sk_buff *in_skb, const struct nlmsghdr *nlh,
259 const struct inet_diag_req_v2 *req)
261 return udp_dump_one(&udplite_table, in_skb, nlh, req);
264 static const struct inet_diag_handler udplite_diag_handler = {
265 .dump = udplite_diag_dump,
266 .dump_one = udplite_diag_dump_one,
267 .idiag_get_info = udp_diag_get_info,
268 .idiag_type = IPPROTO_UDPLITE,
269 .idiag_info_size = 0,
270 #ifdef CONFIG_INET_DIAG_DESTROY
271 .destroy = udplite_diag_destroy,
272 #endif
275 static int __init udp_diag_init(void)
277 int err;
279 err = inet_diag_register(&udp_diag_handler);
280 if (err)
281 goto out;
282 err = inet_diag_register(&udplite_diag_handler);
283 if (err)
284 goto out_lite;
285 out:
286 return err;
287 out_lite:
288 inet_diag_unregister(&udp_diag_handler);
289 goto out;
292 static void __exit udp_diag_exit(void)
294 inet_diag_unregister(&udplite_diag_handler);
295 inet_diag_unregister(&udp_diag_handler);
298 module_init(udp_diag_init);
299 module_exit(udp_diag_exit);
300 MODULE_LICENSE("GPL");
301 MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_NETLINK, NETLINK_SOCK_DIAG, 2-17 /* AF_INET - IPPROTO_UDP */);
302 MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_NETLINK, NETLINK_SOCK_DIAG, 2-136 /* AF_INET - IPPROTO_UDPLITE */);