1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * INET An implementation of the TCP/IP protocol suite for the LINUX
4 * operating system. INET is implemented using the BSD Socket
5 * interface as the means of communication with the user level.
7 * The User Datagram Protocol (UDP).
10 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
11 * Arnt Gulbrandsen, <agulbra@nvg.unit.no>
12 * Alan Cox, <alan@lxorguk.ukuu.org.uk>
13 * Hirokazu Takahashi, <taka@valinux.co.jp>
16 * Alan Cox : verify_area() calls
17 * Alan Cox : stopped close while in use off icmp
18 * messages. Not a fix but a botch that
19 * for udp at least is 'valid'.
20 * Alan Cox : Fixed icmp handling properly
21 * Alan Cox : Correct error for oversized datagrams
22 * Alan Cox : Tidied select() semantics.
23 * Alan Cox : udp_err() fixed properly, also now
24 * select and read wake correctly on errors
25 * Alan Cox : udp_send verify_area moved to avoid mem leak
26 * Alan Cox : UDP can count its memory
27 * Alan Cox : send to an unknown connection causes
28 * an ECONNREFUSED off the icmp, but
30 * Alan Cox : Switched to new sk_buff handlers. No more backlog!
31 * Alan Cox : Using generic datagram code. Even smaller and the PEEK
32 * bug no longer crashes it.
33 * Fred Van Kempen : Net2e support for sk->broadcast.
34 * Alan Cox : Uses skb_free_datagram
35 * Alan Cox : Added get/set sockopt support.
36 * Alan Cox : Broadcasting without option set returns EACCES.
37 * Alan Cox : No wakeup calls. Instead we now use the callbacks.
38 * Alan Cox : Use ip_tos and ip_ttl
39 * Alan Cox : SNMP Mibs
40 * Alan Cox : MSG_DONTROUTE, and 0.0.0.0 support.
41 * Matt Dillon : UDP length checks.
42 * Alan Cox : Smarter af_inet used properly.
43 * Alan Cox : Use new kernel side addressing.
44 * Alan Cox : Incorrect return on truncated datagram receive.
45 * Arnt Gulbrandsen : New udp_send and stuff
46 * Alan Cox : Cache last socket
47 * Alan Cox : Route cache
48 * Jon Peatfield : Minor efficiency fix to sendto().
49 * Mike Shaver : RFC1122 checks.
50 * Alan Cox : Nonblocking error fix.
51 * Willy Konynenberg : Transparent proxying support.
52 * Mike McLagan : Routing by source
53 * David S. Miller : New socket lookup architecture.
54 * Last socket cache retained as it
55 * does have a high hit rate.
56 * Olaf Kirch : Don't linearise iovec on sendmsg.
57 * Andi Kleen : Some cleanups, cache destination entry
59 * Vitaly E. Lavrov : Transparent proxy revived after year coma.
60 * Melvin Smith : Check msg_name not msg_namelen in sendto(),
61 * return ENOTCONN for unconnected sockets (POSIX)
62 * Janos Farkas : don't deliver multi/broadcasts to a different
63 * bound-to-device socket
64 * Hirokazu Takahashi : HW checksumming for outgoing UDP
66 * Hirokazu Takahashi : sendfile() on UDP works now.
67 * Arnaldo C. Melo : convert /proc/net/udp to seq_file
68 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
69 * Alexey Kuznetsov: allow both IPv4 and IPv6 sockets to bind
70 * a single port at the same time.
71 * Derek Atkins <derek@ihtfp.com>: Add Encapulation Support
72 * James Chapman : Add L2TP encapsulation type.
75 #define pr_fmt(fmt) "UDP: " fmt
77 #include <linux/uaccess.h>
78 #include <asm/ioctls.h>
79 #include <linux/memblock.h>
80 #include <linux/highmem.h>
81 #include <linux/swap.h>
82 #include <linux/types.h>
83 #include <linux/fcntl.h>
84 #include <linux/module.h>
85 #include <linux/socket.h>
86 #include <linux/sockios.h>
87 #include <linux/igmp.h>
88 #include <linux/inetdevice.h>
90 #include <linux/errno.h>
91 #include <linux/timer.h>
93 #include <linux/inet.h>
94 #include <linux/netdevice.h>
95 #include <linux/slab.h>
96 #include <net/tcp_states.h>
97 #include <linux/skbuff.h>
98 #include <linux/proc_fs.h>
99 #include <linux/seq_file.h>
100 #include <net/net_namespace.h>
101 #include <net/icmp.h>
102 #include <net/inet_hashtables.h>
103 #include <net/ip_tunnels.h>
104 #include <net/route.h>
105 #include <net/checksum.h>
106 #include <net/xfrm.h>
107 #include <trace/events/udp.h>
108 #include <linux/static_key.h>
109 #include <linux/btf_ids.h>
110 #include <trace/events/skb.h>
111 #include <net/busy_poll.h>
112 #include "udp_impl.h"
113 #include <net/sock_reuseport.h>
114 #include <net/addrconf.h>
115 #include <net/udp_tunnel.h>
116 #if IS_ENABLED(CONFIG_IPV6)
117 #include <net/ipv6_stubs.h>
120 struct udp_table udp_table __read_mostly
;
121 EXPORT_SYMBOL(udp_table
);
123 long sysctl_udp_mem
[3] __read_mostly
;
124 EXPORT_SYMBOL(sysctl_udp_mem
);
126 atomic_long_t udp_memory_allocated
;
127 EXPORT_SYMBOL(udp_memory_allocated
);
129 #define MAX_UDP_PORTS 65536
130 #define PORTS_PER_CHAIN (MAX_UDP_PORTS / UDP_HTABLE_SIZE_MIN)
132 static int udp_lib_lport_inuse(struct net
*net
, __u16 num
,
133 const struct udp_hslot
*hslot
,
134 unsigned long *bitmap
,
135 struct sock
*sk
, unsigned int log
)
138 kuid_t uid
= sock_i_uid(sk
);
140 sk_for_each(sk2
, &hslot
->head
) {
141 if (net_eq(sock_net(sk2
), net
) &&
143 (bitmap
|| udp_sk(sk2
)->udp_port_hash
== num
) &&
144 (!sk2
->sk_reuse
|| !sk
->sk_reuse
) &&
145 (!sk2
->sk_bound_dev_if
|| !sk
->sk_bound_dev_if
||
146 sk2
->sk_bound_dev_if
== sk
->sk_bound_dev_if
) &&
147 inet_rcv_saddr_equal(sk
, sk2
, true)) {
148 if (sk2
->sk_reuseport
&& sk
->sk_reuseport
&&
149 !rcu_access_pointer(sk
->sk_reuseport_cb
) &&
150 uid_eq(uid
, sock_i_uid(sk2
))) {
156 __set_bit(udp_sk(sk2
)->udp_port_hash
>> log
,
165 * Note: we still hold spinlock of primary hash chain, so no other writer
166 * can insert/delete a socket with local_port == num
168 static int udp_lib_lport_inuse2(struct net
*net
, __u16 num
,
169 struct udp_hslot
*hslot2
,
173 kuid_t uid
= sock_i_uid(sk
);
176 spin_lock(&hslot2
->lock
);
177 udp_portaddr_for_each_entry(sk2
, &hslot2
->head
) {
178 if (net_eq(sock_net(sk2
), net
) &&
180 (udp_sk(sk2
)->udp_port_hash
== num
) &&
181 (!sk2
->sk_reuse
|| !sk
->sk_reuse
) &&
182 (!sk2
->sk_bound_dev_if
|| !sk
->sk_bound_dev_if
||
183 sk2
->sk_bound_dev_if
== sk
->sk_bound_dev_if
) &&
184 inet_rcv_saddr_equal(sk
, sk2
, true)) {
185 if (sk2
->sk_reuseport
&& sk
->sk_reuseport
&&
186 !rcu_access_pointer(sk
->sk_reuseport_cb
) &&
187 uid_eq(uid
, sock_i_uid(sk2
))) {
195 spin_unlock(&hslot2
->lock
);
199 static int udp_reuseport_add_sock(struct sock
*sk
, struct udp_hslot
*hslot
)
201 struct net
*net
= sock_net(sk
);
202 kuid_t uid
= sock_i_uid(sk
);
205 sk_for_each(sk2
, &hslot
->head
) {
206 if (net_eq(sock_net(sk2
), net
) &&
208 sk2
->sk_family
== sk
->sk_family
&&
209 ipv6_only_sock(sk2
) == ipv6_only_sock(sk
) &&
210 (udp_sk(sk2
)->udp_port_hash
== udp_sk(sk
)->udp_port_hash
) &&
211 (sk2
->sk_bound_dev_if
== sk
->sk_bound_dev_if
) &&
212 sk2
->sk_reuseport
&& uid_eq(uid
, sock_i_uid(sk2
)) &&
213 inet_rcv_saddr_equal(sk
, sk2
, false)) {
214 return reuseport_add_sock(sk
, sk2
,
215 inet_rcv_saddr_any(sk
));
219 return reuseport_alloc(sk
, inet_rcv_saddr_any(sk
));
223 * udp_lib_get_port - UDP/-Lite port lookup for IPv4 and IPv6
225 * @sk: socket struct in question
226 * @snum: port number to look up
227 * @hash2_nulladdr: AF-dependent hash value in secondary hash chains,
230 int udp_lib_get_port(struct sock
*sk
, unsigned short snum
,
231 unsigned int hash2_nulladdr
)
233 struct udp_hslot
*hslot
, *hslot2
;
234 struct udp_table
*udptable
= sk
->sk_prot
->h
.udp_table
;
236 struct net
*net
= sock_net(sk
);
239 int low
, high
, remaining
;
241 unsigned short first
, last
;
242 DECLARE_BITMAP(bitmap
, PORTS_PER_CHAIN
);
244 inet_get_local_port_range(net
, &low
, &high
);
245 remaining
= (high
- low
) + 1;
247 rand
= prandom_u32();
248 first
= reciprocal_scale(rand
, remaining
) + low
;
250 * force rand to be an odd multiple of UDP_HTABLE_SIZE
252 rand
= (rand
| 1) * (udptable
->mask
+ 1);
253 last
= first
+ udptable
->mask
+ 1;
255 hslot
= udp_hashslot(udptable
, net
, first
);
256 bitmap_zero(bitmap
, PORTS_PER_CHAIN
);
257 spin_lock_bh(&hslot
->lock
);
258 udp_lib_lport_inuse(net
, snum
, hslot
, bitmap
, sk
,
263 * Iterate on all possible values of snum for this hash.
264 * Using steps of an odd multiple of UDP_HTABLE_SIZE
265 * give us randomization and full range coverage.
268 if (low
<= snum
&& snum
<= high
&&
269 !test_bit(snum
>> udptable
->log
, bitmap
) &&
270 !inet_is_local_reserved_port(net
, snum
))
273 } while (snum
!= first
);
274 spin_unlock_bh(&hslot
->lock
);
276 } while (++first
!= last
);
279 hslot
= udp_hashslot(udptable
, net
, snum
);
280 spin_lock_bh(&hslot
->lock
);
281 if (hslot
->count
> 10) {
283 unsigned int slot2
= udp_sk(sk
)->udp_portaddr_hash
^ snum
;
285 slot2
&= udptable
->mask
;
286 hash2_nulladdr
&= udptable
->mask
;
288 hslot2
= udp_hashslot2(udptable
, slot2
);
289 if (hslot
->count
< hslot2
->count
)
290 goto scan_primary_hash
;
292 exist
= udp_lib_lport_inuse2(net
, snum
, hslot2
, sk
);
293 if (!exist
&& (hash2_nulladdr
!= slot2
)) {
294 hslot2
= udp_hashslot2(udptable
, hash2_nulladdr
);
295 exist
= udp_lib_lport_inuse2(net
, snum
, hslot2
,
304 if (udp_lib_lport_inuse(net
, snum
, hslot
, NULL
, sk
, 0))
308 inet_sk(sk
)->inet_num
= snum
;
309 udp_sk(sk
)->udp_port_hash
= snum
;
310 udp_sk(sk
)->udp_portaddr_hash
^= snum
;
311 if (sk_unhashed(sk
)) {
312 if (sk
->sk_reuseport
&&
313 udp_reuseport_add_sock(sk
, hslot
)) {
314 inet_sk(sk
)->inet_num
= 0;
315 udp_sk(sk
)->udp_port_hash
= 0;
316 udp_sk(sk
)->udp_portaddr_hash
^= snum
;
320 sk_add_node_rcu(sk
, &hslot
->head
);
322 sock_prot_inuse_add(sock_net(sk
), sk
->sk_prot
, 1);
324 hslot2
= udp_hashslot2(udptable
, udp_sk(sk
)->udp_portaddr_hash
);
325 spin_lock(&hslot2
->lock
);
326 if (IS_ENABLED(CONFIG_IPV6
) && sk
->sk_reuseport
&&
327 sk
->sk_family
== AF_INET6
)
328 hlist_add_tail_rcu(&udp_sk(sk
)->udp_portaddr_node
,
331 hlist_add_head_rcu(&udp_sk(sk
)->udp_portaddr_node
,
334 spin_unlock(&hslot2
->lock
);
336 sock_set_flag(sk
, SOCK_RCU_FREE
);
339 spin_unlock_bh(&hslot
->lock
);
343 EXPORT_SYMBOL(udp_lib_get_port
);
345 int udp_v4_get_port(struct sock
*sk
, unsigned short snum
)
347 unsigned int hash2_nulladdr
=
348 ipv4_portaddr_hash(sock_net(sk
), htonl(INADDR_ANY
), snum
);
349 unsigned int hash2_partial
=
350 ipv4_portaddr_hash(sock_net(sk
), inet_sk(sk
)->inet_rcv_saddr
, 0);
352 /* precompute partial secondary hash */
353 udp_sk(sk
)->udp_portaddr_hash
= hash2_partial
;
354 return udp_lib_get_port(sk
, snum
, hash2_nulladdr
);
357 static int compute_score(struct sock
*sk
, struct net
*net
,
358 __be32 saddr
, __be16 sport
,
359 __be32 daddr
, unsigned short hnum
,
363 struct inet_sock
*inet
;
366 if (!net_eq(sock_net(sk
), net
) ||
367 udp_sk(sk
)->udp_port_hash
!= hnum
||
371 if (sk
->sk_rcv_saddr
!= daddr
)
374 score
= (sk
->sk_family
== PF_INET
) ? 2 : 1;
377 if (inet
->inet_daddr
) {
378 if (inet
->inet_daddr
!= saddr
)
383 if (inet
->inet_dport
) {
384 if (inet
->inet_dport
!= sport
)
389 dev_match
= udp_sk_bound_dev_eq(net
, sk
->sk_bound_dev_if
,
395 if (READ_ONCE(sk
->sk_incoming_cpu
) == raw_smp_processor_id())
400 static u32
udp_ehashfn(const struct net
*net
, const __be32 laddr
,
401 const __u16 lport
, const __be32 faddr
,
404 static u32 udp_ehash_secret __read_mostly
;
406 net_get_random_once(&udp_ehash_secret
, sizeof(udp_ehash_secret
));
408 return __inet_ehashfn(laddr
, lport
, faddr
, fport
,
409 udp_ehash_secret
+ net_hash_mix(net
));
412 static struct sock
*lookup_reuseport(struct net
*net
, struct sock
*sk
,
414 __be32 saddr
, __be16 sport
,
415 __be32 daddr
, unsigned short hnum
)
417 struct sock
*reuse_sk
= NULL
;
420 if (sk
->sk_reuseport
&& sk
->sk_state
!= TCP_ESTABLISHED
) {
421 hash
= udp_ehashfn(net
, daddr
, hnum
, saddr
, sport
);
422 reuse_sk
= reuseport_select_sock(sk
, hash
, skb
,
423 sizeof(struct udphdr
));
428 /* called with rcu_read_lock() */
429 static struct sock
*udp4_lib_lookup2(struct net
*net
,
430 __be32 saddr
, __be16 sport
,
431 __be32 daddr
, unsigned int hnum
,
433 struct udp_hslot
*hslot2
,
436 struct sock
*sk
, *result
;
441 udp_portaddr_for_each_entry_rcu(sk
, &hslot2
->head
) {
442 score
= compute_score(sk
, net
, saddr
, sport
,
443 daddr
, hnum
, dif
, sdif
);
444 if (score
> badness
) {
445 result
= lookup_reuseport(net
, sk
, skb
,
446 saddr
, sport
, daddr
, hnum
);
447 /* Fall back to scoring if group has connections */
448 if (result
&& !reuseport_has_conns(sk
, false))
451 result
= result
? : sk
;
458 static struct sock
*udp4_lookup_run_bpf(struct net
*net
,
459 struct udp_table
*udptable
,
461 __be32 saddr
, __be16 sport
,
462 __be32 daddr
, u16 hnum
)
464 struct sock
*sk
, *reuse_sk
;
467 if (udptable
!= &udp_table
)
468 return NULL
; /* only UDP is supported */
470 no_reuseport
= bpf_sk_lookup_run_v4(net
, IPPROTO_UDP
,
471 saddr
, sport
, daddr
, hnum
, &sk
);
472 if (no_reuseport
|| IS_ERR_OR_NULL(sk
))
475 reuse_sk
= lookup_reuseport(net
, sk
, skb
, saddr
, sport
, daddr
, hnum
);
481 /* UDP is nearly always wildcards out the wazoo, it makes no sense to try
482 * harder than this. -DaveM
484 struct sock
*__udp4_lib_lookup(struct net
*net
, __be32 saddr
,
485 __be16 sport
, __be32 daddr
, __be16 dport
, int dif
,
486 int sdif
, struct udp_table
*udptable
, struct sk_buff
*skb
)
488 unsigned short hnum
= ntohs(dport
);
489 unsigned int hash2
, slot2
;
490 struct udp_hslot
*hslot2
;
491 struct sock
*result
, *sk
;
493 hash2
= ipv4_portaddr_hash(net
, daddr
, hnum
);
494 slot2
= hash2
& udptable
->mask
;
495 hslot2
= &udptable
->hash2
[slot2
];
497 /* Lookup connected or non-wildcard socket */
498 result
= udp4_lib_lookup2(net
, saddr
, sport
,
499 daddr
, hnum
, dif
, sdif
,
501 if (!IS_ERR_OR_NULL(result
) && result
->sk_state
== TCP_ESTABLISHED
)
504 /* Lookup redirect from BPF */
505 if (static_branch_unlikely(&bpf_sk_lookup_enabled
)) {
506 sk
= udp4_lookup_run_bpf(net
, udptable
, skb
,
507 saddr
, sport
, daddr
, hnum
);
514 /* Got non-wildcard socket or error on first lookup */
518 /* Lookup wildcard sockets */
519 hash2
= ipv4_portaddr_hash(net
, htonl(INADDR_ANY
), hnum
);
520 slot2
= hash2
& udptable
->mask
;
521 hslot2
= &udptable
->hash2
[slot2
];
523 result
= udp4_lib_lookup2(net
, saddr
, sport
,
524 htonl(INADDR_ANY
), hnum
, dif
, sdif
,
531 EXPORT_SYMBOL_GPL(__udp4_lib_lookup
);
533 static inline struct sock
*__udp4_lib_lookup_skb(struct sk_buff
*skb
,
534 __be16 sport
, __be16 dport
,
535 struct udp_table
*udptable
)
537 const struct iphdr
*iph
= ip_hdr(skb
);
539 return __udp4_lib_lookup(dev_net(skb
->dev
), iph
->saddr
, sport
,
540 iph
->daddr
, dport
, inet_iif(skb
),
541 inet_sdif(skb
), udptable
, skb
);
544 struct sock
*udp4_lib_lookup_skb(const struct sk_buff
*skb
,
545 __be16 sport
, __be16 dport
)
547 const struct iphdr
*iph
= ip_hdr(skb
);
549 return __udp4_lib_lookup(dev_net(skb
->dev
), iph
->saddr
, sport
,
550 iph
->daddr
, dport
, inet_iif(skb
),
551 inet_sdif(skb
), &udp_table
, NULL
);
554 /* Must be called under rcu_read_lock().
555 * Does increment socket refcount.
557 #if IS_ENABLED(CONFIG_NF_TPROXY_IPV4) || IS_ENABLED(CONFIG_NF_SOCKET_IPV4)
558 struct sock
*udp4_lib_lookup(struct net
*net
, __be32 saddr
, __be16 sport
,
559 __be32 daddr
, __be16 dport
, int dif
)
563 sk
= __udp4_lib_lookup(net
, saddr
, sport
, daddr
, dport
,
564 dif
, 0, &udp_table
, NULL
);
565 if (sk
&& !refcount_inc_not_zero(&sk
->sk_refcnt
))
569 EXPORT_SYMBOL_GPL(udp4_lib_lookup
);
572 static inline bool __udp_is_mcast_sock(struct net
*net
, struct sock
*sk
,
573 __be16 loc_port
, __be32 loc_addr
,
574 __be16 rmt_port
, __be32 rmt_addr
,
575 int dif
, int sdif
, unsigned short hnum
)
577 struct inet_sock
*inet
= inet_sk(sk
);
579 if (!net_eq(sock_net(sk
), net
) ||
580 udp_sk(sk
)->udp_port_hash
!= hnum
||
581 (inet
->inet_daddr
&& inet
->inet_daddr
!= rmt_addr
) ||
582 (inet
->inet_dport
!= rmt_port
&& inet
->inet_dport
) ||
583 (inet
->inet_rcv_saddr
&& inet
->inet_rcv_saddr
!= loc_addr
) ||
584 ipv6_only_sock(sk
) ||
585 !udp_sk_bound_dev_eq(net
, sk
->sk_bound_dev_if
, dif
, sdif
))
587 if (!ip_mc_sf_allow(sk
, loc_addr
, rmt_addr
, dif
, sdif
))
592 DEFINE_STATIC_KEY_FALSE(udp_encap_needed_key
);
593 void udp_encap_enable(void)
595 static_branch_inc(&udp_encap_needed_key
);
597 EXPORT_SYMBOL(udp_encap_enable
);
599 /* Handler for tunnels with arbitrary destination ports: no socket lookup, go
600 * through error handlers in encapsulations looking for a match.
602 static int __udp4_lib_err_encap_no_sk(struct sk_buff
*skb
, u32 info
)
606 for (i
= 0; i
< MAX_IPTUN_ENCAP_OPS
; i
++) {
607 int (*handler
)(struct sk_buff
*skb
, u32 info
);
608 const struct ip_tunnel_encap_ops
*encap
;
610 encap
= rcu_dereference(iptun_encaps
[i
]);
613 handler
= encap
->err_handler
;
614 if (handler
&& !handler(skb
, info
))
621 /* Try to match ICMP errors to UDP tunnels by looking up a socket without
622 * reversing source and destination port: this will match tunnels that force the
623 * same destination port on both endpoints (e.g. VXLAN, GENEVE). Note that
624 * lwtunnels might actually break this assumption by being configured with
625 * different destination ports on endpoints, in this case we won't be able to
626 * trace ICMP messages back to them.
628 * If this doesn't match any socket, probe tunnels with arbitrary destination
629 * ports (e.g. FoU, GUE): there, the receiving socket is useless, as the port
630 * we've sent packets to won't necessarily match the local destination port.
632 * Then ask the tunnel implementation to match the error against a valid
635 * Return an error if we can't find a match, the socket if we need further
636 * processing, zero otherwise.
638 static struct sock
*__udp4_lib_err_encap(struct net
*net
,
639 const struct iphdr
*iph
,
641 struct udp_table
*udptable
,
642 struct sk_buff
*skb
, u32 info
)
644 int network_offset
, transport_offset
;
647 network_offset
= skb_network_offset(skb
);
648 transport_offset
= skb_transport_offset(skb
);
650 /* Network header needs to point to the outer IPv4 header inside ICMP */
651 skb_reset_network_header(skb
);
653 /* Transport header needs to point to the UDP header */
654 skb_set_transport_header(skb
, iph
->ihl
<< 2);
656 sk
= __udp4_lib_lookup(net
, iph
->daddr
, uh
->source
,
657 iph
->saddr
, uh
->dest
, skb
->dev
->ifindex
, 0,
660 int (*lookup
)(struct sock
*sk
, struct sk_buff
*skb
);
661 struct udp_sock
*up
= udp_sk(sk
);
663 lookup
= READ_ONCE(up
->encap_err_lookup
);
664 if (!lookup
|| lookup(sk
, skb
))
669 sk
= ERR_PTR(__udp4_lib_err_encap_no_sk(skb
, info
));
671 skb_set_transport_header(skb
, transport_offset
);
672 skb_set_network_header(skb
, network_offset
);
678 * This routine is called by the ICMP module when it gets some
679 * sort of error condition. If err < 0 then the socket should
680 * be closed and the error returned to the user. If err > 0
681 * it's just the icmp type << 8 | icmp code.
682 * Header points to the ip header of the error packet. We move
683 * on past this. Then (as it used to claim before adjustment)
684 * header points to the first 8 bytes of the udp header. We need
685 * to find the appropriate port.
688 int __udp4_lib_err(struct sk_buff
*skb
, u32 info
, struct udp_table
*udptable
)
690 struct inet_sock
*inet
;
691 const struct iphdr
*iph
= (const struct iphdr
*)skb
->data
;
692 struct udphdr
*uh
= (struct udphdr
*)(skb
->data
+(iph
->ihl
<<2));
693 const int type
= icmp_hdr(skb
)->type
;
694 const int code
= icmp_hdr(skb
)->code
;
699 struct net
*net
= dev_net(skb
->dev
);
701 sk
= __udp4_lib_lookup(net
, iph
->daddr
, uh
->dest
,
702 iph
->saddr
, uh
->source
, skb
->dev
->ifindex
,
703 inet_sdif(skb
), udptable
, NULL
);
704 if (!sk
|| udp_sk(sk
)->encap_type
) {
705 /* No socket for error: try tunnels before discarding */
706 sk
= ERR_PTR(-ENOENT
);
707 if (static_branch_unlikely(&udp_encap_needed_key
)) {
708 sk
= __udp4_lib_err_encap(net
, iph
, uh
, udptable
, skb
,
715 __ICMP_INC_STATS(net
, ICMP_MIB_INERRORS
);
728 case ICMP_TIME_EXCEEDED
:
731 case ICMP_SOURCE_QUENCH
:
733 case ICMP_PARAMETERPROB
:
737 case ICMP_DEST_UNREACH
:
738 if (code
== ICMP_FRAG_NEEDED
) { /* Path MTU discovery */
739 ipv4_sk_update_pmtu(skb
, sk
, info
);
740 if (inet
->pmtudisc
!= IP_PMTUDISC_DONT
) {
748 if (code
<= NR_ICMP_UNREACH
) {
749 harderr
= icmp_err_convert
[code
].fatal
;
750 err
= icmp_err_convert
[code
].errno
;
754 ipv4_sk_redirect(skb
, sk
);
759 * RFC1122: OK. Passes ICMP errors back to application, as per
763 /* ...not for tunnels though: we don't have a sending socket */
766 if (!inet
->recverr
) {
767 if (!harderr
|| sk
->sk_state
!= TCP_ESTABLISHED
)
770 ip_icmp_error(sk
, skb
, err
, uh
->dest
, info
, (u8
*)(uh
+1));
773 sk
->sk_error_report(sk
);
778 int udp_err(struct sk_buff
*skb
, u32 info
)
780 return __udp4_lib_err(skb
, info
, &udp_table
);
784 * Throw away all pending data and cancel the corking. Socket is locked.
786 void udp_flush_pending_frames(struct sock
*sk
)
788 struct udp_sock
*up
= udp_sk(sk
);
793 ip_flush_pending_frames(sk
);
796 EXPORT_SYMBOL(udp_flush_pending_frames
);
799 * udp4_hwcsum - handle outgoing HW checksumming
800 * @skb: sk_buff containing the filled-in UDP header
801 * (checksum field must be zeroed out)
802 * @src: source IP address
803 * @dst: destination IP address
805 void udp4_hwcsum(struct sk_buff
*skb
, __be32 src
, __be32 dst
)
807 struct udphdr
*uh
= udp_hdr(skb
);
808 int offset
= skb_transport_offset(skb
);
809 int len
= skb
->len
- offset
;
813 if (!skb_has_frag_list(skb
)) {
815 * Only one fragment on the socket.
817 skb
->csum_start
= skb_transport_header(skb
) - skb
->head
;
818 skb
->csum_offset
= offsetof(struct udphdr
, check
);
819 uh
->check
= ~csum_tcpudp_magic(src
, dst
, len
,
822 struct sk_buff
*frags
;
825 * HW-checksum won't work as there are two or more
826 * fragments on the socket so that all csums of sk_buffs
829 skb_walk_frags(skb
, frags
) {
830 csum
= csum_add(csum
, frags
->csum
);
834 csum
= skb_checksum(skb
, offset
, hlen
, csum
);
835 skb
->ip_summed
= CHECKSUM_NONE
;
837 uh
->check
= csum_tcpudp_magic(src
, dst
, len
, IPPROTO_UDP
, csum
);
839 uh
->check
= CSUM_MANGLED_0
;
842 EXPORT_SYMBOL_GPL(udp4_hwcsum
);
844 /* Function to set UDP checksum for an IPv4 UDP packet. This is intended
845 * for the simple case like when setting the checksum for a UDP tunnel.
847 void udp_set_csum(bool nocheck
, struct sk_buff
*skb
,
848 __be32 saddr
, __be32 daddr
, int len
)
850 struct udphdr
*uh
= udp_hdr(skb
);
854 } else if (skb_is_gso(skb
)) {
855 uh
->check
= ~udp_v4_check(len
, saddr
, daddr
, 0);
856 } else if (skb
->ip_summed
== CHECKSUM_PARTIAL
) {
858 uh
->check
= udp_v4_check(len
, saddr
, daddr
, lco_csum(skb
));
860 uh
->check
= CSUM_MANGLED_0
;
862 skb
->ip_summed
= CHECKSUM_PARTIAL
;
863 skb
->csum_start
= skb_transport_header(skb
) - skb
->head
;
864 skb
->csum_offset
= offsetof(struct udphdr
, check
);
865 uh
->check
= ~udp_v4_check(len
, saddr
, daddr
, 0);
868 EXPORT_SYMBOL(udp_set_csum
);
870 static int udp_send_skb(struct sk_buff
*skb
, struct flowi4
*fl4
,
871 struct inet_cork
*cork
)
873 struct sock
*sk
= skb
->sk
;
874 struct inet_sock
*inet
= inet_sk(sk
);
877 int is_udplite
= IS_UDPLITE(sk
);
878 int offset
= skb_transport_offset(skb
);
879 int len
= skb
->len
- offset
;
880 int datalen
= len
- sizeof(*uh
);
884 * Create a UDP header
887 uh
->source
= inet
->inet_sport
;
888 uh
->dest
= fl4
->fl4_dport
;
889 uh
->len
= htons(len
);
892 if (cork
->gso_size
) {
893 const int hlen
= skb_network_header_len(skb
) +
894 sizeof(struct udphdr
);
896 if (hlen
+ cork
->gso_size
> cork
->fragsize
) {
900 if (skb
->len
> cork
->gso_size
* UDP_MAX_SEGMENTS
) {
904 if (sk
->sk_no_check_tx
) {
908 if (skb
->ip_summed
!= CHECKSUM_PARTIAL
|| is_udplite
||
909 dst_xfrm(skb_dst(skb
))) {
914 if (datalen
> cork
->gso_size
) {
915 skb_shinfo(skb
)->gso_size
= cork
->gso_size
;
916 skb_shinfo(skb
)->gso_type
= SKB_GSO_UDP_L4
;
917 skb_shinfo(skb
)->gso_segs
= DIV_ROUND_UP(datalen
,
923 if (is_udplite
) /* UDP-Lite */
924 csum
= udplite_csum(skb
);
926 else if (sk
->sk_no_check_tx
) { /* UDP csum off */
928 skb
->ip_summed
= CHECKSUM_NONE
;
931 } else if (skb
->ip_summed
== CHECKSUM_PARTIAL
) { /* UDP hardware csum */
934 udp4_hwcsum(skb
, fl4
->saddr
, fl4
->daddr
);
938 csum
= udp_csum(skb
);
940 /* add protocol-dependent pseudo-header */
941 uh
->check
= csum_tcpudp_magic(fl4
->saddr
, fl4
->daddr
, len
,
942 sk
->sk_protocol
, csum
);
944 uh
->check
= CSUM_MANGLED_0
;
947 err
= ip_send_skb(sock_net(sk
), skb
);
949 if (err
== -ENOBUFS
&& !inet
->recverr
) {
950 UDP_INC_STATS(sock_net(sk
),
951 UDP_MIB_SNDBUFERRORS
, is_udplite
);
955 UDP_INC_STATS(sock_net(sk
),
956 UDP_MIB_OUTDATAGRAMS
, is_udplite
);
961 * Push out all pending data as one UDP datagram. Socket is locked.
963 int udp_push_pending_frames(struct sock
*sk
)
965 struct udp_sock
*up
= udp_sk(sk
);
966 struct inet_sock
*inet
= inet_sk(sk
);
967 struct flowi4
*fl4
= &inet
->cork
.fl
.u
.ip4
;
971 skb
= ip_finish_skb(sk
, fl4
);
975 err
= udp_send_skb(skb
, fl4
, &inet
->cork
.base
);
982 EXPORT_SYMBOL(udp_push_pending_frames
);
984 static int __udp_cmsg_send(struct cmsghdr
*cmsg
, u16
*gso_size
)
986 switch (cmsg
->cmsg_type
) {
988 if (cmsg
->cmsg_len
!= CMSG_LEN(sizeof(__u16
)))
990 *gso_size
= *(__u16
*)CMSG_DATA(cmsg
);
997 int udp_cmsg_send(struct sock
*sk
, struct msghdr
*msg
, u16
*gso_size
)
999 struct cmsghdr
*cmsg
;
1000 bool need_ip
= false;
1003 for_each_cmsghdr(cmsg
, msg
) {
1004 if (!CMSG_OK(msg
, cmsg
))
1007 if (cmsg
->cmsg_level
!= SOL_UDP
) {
1012 err
= __udp_cmsg_send(cmsg
, gso_size
);
1019 EXPORT_SYMBOL_GPL(udp_cmsg_send
);
1021 int udp_sendmsg(struct sock
*sk
, struct msghdr
*msg
, size_t len
)
1023 struct inet_sock
*inet
= inet_sk(sk
);
1024 struct udp_sock
*up
= udp_sk(sk
);
1025 DECLARE_SOCKADDR(struct sockaddr_in
*, usin
, msg
->msg_name
);
1026 struct flowi4 fl4_stack
;
1029 struct ipcm_cookie ipc
;
1030 struct rtable
*rt
= NULL
;
1033 __be32 daddr
, faddr
, saddr
;
1036 int err
, is_udplite
= IS_UDPLITE(sk
);
1037 int corkreq
= up
->corkflag
|| msg
->msg_flags
&MSG_MORE
;
1038 int (*getfrag
)(void *, char *, int, int, int, struct sk_buff
*);
1039 struct sk_buff
*skb
;
1040 struct ip_options_data opt_copy
;
1049 if (msg
->msg_flags
& MSG_OOB
) /* Mirror BSD error message compatibility */
1052 getfrag
= is_udplite
? udplite_getfrag
: ip_generic_getfrag
;
1054 fl4
= &inet
->cork
.fl
.u
.ip4
;
1057 * There are pending frames.
1058 * The socket lock must be held while it's corked.
1061 if (likely(up
->pending
)) {
1062 if (unlikely(up
->pending
!= AF_INET
)) {
1066 goto do_append_data
;
1070 ulen
+= sizeof(struct udphdr
);
1073 * Get and verify the address.
1076 if (msg
->msg_namelen
< sizeof(*usin
))
1078 if (usin
->sin_family
!= AF_INET
) {
1079 if (usin
->sin_family
!= AF_UNSPEC
)
1080 return -EAFNOSUPPORT
;
1083 daddr
= usin
->sin_addr
.s_addr
;
1084 dport
= usin
->sin_port
;
1088 if (sk
->sk_state
!= TCP_ESTABLISHED
)
1089 return -EDESTADDRREQ
;
1090 daddr
= inet
->inet_daddr
;
1091 dport
= inet
->inet_dport
;
1092 /* Open fast path for connected socket.
1093 Route will not be used, if at least one option is set.
1098 ipcm_init_sk(&ipc
, inet
);
1099 ipc
.gso_size
= up
->gso_size
;
1101 if (msg
->msg_controllen
) {
1102 err
= udp_cmsg_send(sk
, msg
, &ipc
.gso_size
);
1104 err
= ip_cmsg_send(sk
, msg
, &ipc
,
1105 sk
->sk_family
== AF_INET6
);
1106 if (unlikely(err
< 0)) {
1115 struct ip_options_rcu
*inet_opt
;
1118 inet_opt
= rcu_dereference(inet
->inet_opt
);
1120 memcpy(&opt_copy
, inet_opt
,
1121 sizeof(*inet_opt
) + inet_opt
->opt
.optlen
);
1122 ipc
.opt
= &opt_copy
.opt
;
1127 if (cgroup_bpf_enabled
&& !connected
) {
1128 err
= BPF_CGROUP_RUN_PROG_UDP4_SENDMSG_LOCK(sk
,
1129 (struct sockaddr
*)usin
, &ipc
.addr
);
1133 if (usin
->sin_port
== 0) {
1134 /* BPF program set invalid port. Reject it. */
1138 daddr
= usin
->sin_addr
.s_addr
;
1139 dport
= usin
->sin_port
;
1144 ipc
.addr
= faddr
= daddr
;
1146 if (ipc
.opt
&& ipc
.opt
->opt
.srr
) {
1151 faddr
= ipc
.opt
->opt
.faddr
;
1154 tos
= get_rttos(&ipc
, inet
);
1155 if (sock_flag(sk
, SOCK_LOCALROUTE
) ||
1156 (msg
->msg_flags
& MSG_DONTROUTE
) ||
1157 (ipc
.opt
&& ipc
.opt
->opt
.is_strictroute
)) {
1162 if (ipv4_is_multicast(daddr
)) {
1163 if (!ipc
.oif
|| netif_index_is_l3_master(sock_net(sk
), ipc
.oif
))
1164 ipc
.oif
= inet
->mc_index
;
1166 saddr
= inet
->mc_addr
;
1168 } else if (!ipc
.oif
) {
1169 ipc
.oif
= inet
->uc_index
;
1170 } else if (ipv4_is_lbcast(daddr
) && inet
->uc_index
) {
1171 /* oif is set, packet is to local broadcast and
1172 * uc_index is set. oif is most likely set
1173 * by sk_bound_dev_if. If uc_index != oif check if the
1174 * oif is an L3 master and uc_index is an L3 slave.
1175 * If so, we want to allow the send using the uc_index.
1177 if (ipc
.oif
!= inet
->uc_index
&&
1178 ipc
.oif
== l3mdev_master_ifindex_by_index(sock_net(sk
),
1180 ipc
.oif
= inet
->uc_index
;
1185 rt
= (struct rtable
*)sk_dst_check(sk
, 0);
1188 struct net
*net
= sock_net(sk
);
1189 __u8 flow_flags
= inet_sk_flowi_flags(sk
);
1193 flowi4_init_output(fl4
, ipc
.oif
, ipc
.sockc
.mark
, tos
,
1194 RT_SCOPE_UNIVERSE
, sk
->sk_protocol
,
1196 faddr
, saddr
, dport
, inet
->inet_sport
,
1199 security_sk_classify_flow(sk
, flowi4_to_flowi_common(fl4
));
1200 rt
= ip_route_output_flow(net
, fl4
, sk
);
1204 if (err
== -ENETUNREACH
)
1205 IP_INC_STATS(net
, IPSTATS_MIB_OUTNOROUTES
);
1210 if ((rt
->rt_flags
& RTCF_BROADCAST
) &&
1211 !sock_flag(sk
, SOCK_BROADCAST
))
1214 sk_dst_set(sk
, dst_clone(&rt
->dst
));
1217 if (msg
->msg_flags
&MSG_CONFIRM
)
1223 daddr
= ipc
.addr
= fl4
->daddr
;
1225 /* Lockless fast path for the non-corking case. */
1227 struct inet_cork cork
;
1229 skb
= ip_make_skb(sk
, fl4
, getfrag
, msg
, ulen
,
1230 sizeof(struct udphdr
), &ipc
, &rt
,
1231 &cork
, msg
->msg_flags
);
1233 if (!IS_ERR_OR_NULL(skb
))
1234 err
= udp_send_skb(skb
, fl4
, &cork
);
1239 if (unlikely(up
->pending
)) {
1240 /* The socket is already corked while preparing it. */
1241 /* ... which is an evident application bug. --ANK */
1244 net_dbg_ratelimited("socket already corked\n");
1249 * Now cork the socket to pend data.
1251 fl4
= &inet
->cork
.fl
.u
.ip4
;
1254 fl4
->fl4_dport
= dport
;
1255 fl4
->fl4_sport
= inet
->inet_sport
;
1256 up
->pending
= AF_INET
;
1260 err
= ip_append_data(sk
, fl4
, getfrag
, msg
, ulen
,
1261 sizeof(struct udphdr
), &ipc
, &rt
,
1262 corkreq
? msg
->msg_flags
|MSG_MORE
: msg
->msg_flags
);
1264 udp_flush_pending_frames(sk
);
1266 err
= udp_push_pending_frames(sk
);
1267 else if (unlikely(skb_queue_empty(&sk
->sk_write_queue
)))
1279 * ENOBUFS = no kernel mem, SOCK_NOSPACE = no sndbuf space. Reporting
1280 * ENOBUFS might not be good (it's not tunable per se), but otherwise
1281 * we don't have a good statistic (IpOutDiscards but it can be too many
1282 * things). We could add another new stat but at least for now that
1283 * seems like overkill.
1285 if (err
== -ENOBUFS
|| test_bit(SOCK_NOSPACE
, &sk
->sk_socket
->flags
)) {
1286 UDP_INC_STATS(sock_net(sk
),
1287 UDP_MIB_SNDBUFERRORS
, is_udplite
);
1292 if (msg
->msg_flags
& MSG_PROBE
)
1293 dst_confirm_neigh(&rt
->dst
, &fl4
->daddr
);
1294 if (!(msg
->msg_flags
&MSG_PROBE
) || len
)
1295 goto back_from_confirm
;
1299 EXPORT_SYMBOL(udp_sendmsg
);
1301 int udp_sendpage(struct sock
*sk
, struct page
*page
, int offset
,
1302 size_t size
, int flags
)
1304 struct inet_sock
*inet
= inet_sk(sk
);
1305 struct udp_sock
*up
= udp_sk(sk
);
1308 if (flags
& MSG_SENDPAGE_NOTLAST
)
1312 struct msghdr msg
= { .msg_flags
= flags
|MSG_MORE
};
1314 /* Call udp_sendmsg to specify destination address which
1315 * sendpage interface can't pass.
1316 * This will succeed only when the socket is connected.
1318 ret
= udp_sendmsg(sk
, &msg
, 0);
1325 if (unlikely(!up
->pending
)) {
1328 net_dbg_ratelimited("cork failed\n");
1332 ret
= ip_append_page(sk
, &inet
->cork
.fl
.u
.ip4
,
1333 page
, offset
, size
, flags
);
1334 if (ret
== -EOPNOTSUPP
) {
1336 return sock_no_sendpage(sk
->sk_socket
, page
, offset
,
1340 udp_flush_pending_frames(sk
);
1345 if (!(up
->corkflag
|| (flags
&MSG_MORE
)))
1346 ret
= udp_push_pending_frames(sk
);
1354 #define UDP_SKB_IS_STATELESS 0x80000000
1356 /* all head states (dst, sk, nf conntrack) except skb extensions are
1357 * cleared by udp_rcv().
1359 * We need to preserve secpath, if present, to eventually process
1360 * IP_CMSG_PASSSEC at recvmsg() time.
1362 * Other extensions can be cleared.
1364 static bool udp_try_make_stateless(struct sk_buff
*skb
)
1366 if (!skb_has_extensions(skb
))
1369 if (!secpath_exists(skb
)) {
1377 static void udp_set_dev_scratch(struct sk_buff
*skb
)
1379 struct udp_dev_scratch
*scratch
= udp_skb_scratch(skb
);
1381 BUILD_BUG_ON(sizeof(struct udp_dev_scratch
) > sizeof(long));
1382 scratch
->_tsize_state
= skb
->truesize
;
1383 #if BITS_PER_LONG == 64
1384 scratch
->len
= skb
->len
;
1385 scratch
->csum_unnecessary
= !!skb_csum_unnecessary(skb
);
1386 scratch
->is_linear
= !skb_is_nonlinear(skb
);
1388 if (udp_try_make_stateless(skb
))
1389 scratch
->_tsize_state
|= UDP_SKB_IS_STATELESS
;
1392 static void udp_skb_csum_unnecessary_set(struct sk_buff
*skb
)
1394 /* We come here after udp_lib_checksum_complete() returned 0.
1395 * This means that __skb_checksum_complete() might have
1396 * set skb->csum_valid to 1.
1397 * On 64bit platforms, we can set csum_unnecessary
1398 * to true, but only if the skb is not shared.
1400 #if BITS_PER_LONG == 64
1401 if (!skb_shared(skb
))
1402 udp_skb_scratch(skb
)->csum_unnecessary
= true;
1406 static int udp_skb_truesize(struct sk_buff
*skb
)
1408 return udp_skb_scratch(skb
)->_tsize_state
& ~UDP_SKB_IS_STATELESS
;
1411 static bool udp_skb_has_head_state(struct sk_buff
*skb
)
1413 return !(udp_skb_scratch(skb
)->_tsize_state
& UDP_SKB_IS_STATELESS
);
1416 /* fully reclaim rmem/fwd memory allocated for skb */
1417 static void udp_rmem_release(struct sock
*sk
, int size
, int partial
,
1418 bool rx_queue_lock_held
)
1420 struct udp_sock
*up
= udp_sk(sk
);
1421 struct sk_buff_head
*sk_queue
;
1424 if (likely(partial
)) {
1425 up
->forward_deficit
+= size
;
1426 size
= up
->forward_deficit
;
1427 if (size
< (sk
->sk_rcvbuf
>> 2) &&
1428 !skb_queue_empty(&up
->reader_queue
))
1431 size
+= up
->forward_deficit
;
1433 up
->forward_deficit
= 0;
1435 /* acquire the sk_receive_queue for fwd allocated memory scheduling,
1436 * if the called don't held it already
1438 sk_queue
= &sk
->sk_receive_queue
;
1439 if (!rx_queue_lock_held
)
1440 spin_lock(&sk_queue
->lock
);
1443 sk
->sk_forward_alloc
+= size
;
1444 amt
= (sk
->sk_forward_alloc
- partial
) & ~(SK_MEM_QUANTUM
- 1);
1445 sk
->sk_forward_alloc
-= amt
;
1448 __sk_mem_reduce_allocated(sk
, amt
>> SK_MEM_QUANTUM_SHIFT
);
1450 atomic_sub(size
, &sk
->sk_rmem_alloc
);
1452 /* this can save us from acquiring the rx queue lock on next receive */
1453 skb_queue_splice_tail_init(sk_queue
, &up
->reader_queue
);
1455 if (!rx_queue_lock_held
)
1456 spin_unlock(&sk_queue
->lock
);
1459 /* Note: called with reader_queue.lock held.
1460 * Instead of using skb->truesize here, find a copy of it in skb->dev_scratch
1461 * This avoids a cache line miss while receive_queue lock is held.
1462 * Look at __udp_enqueue_schedule_skb() to find where this copy is done.
1464 void udp_skb_destructor(struct sock
*sk
, struct sk_buff
*skb
)
1466 prefetch(&skb
->data
);
1467 udp_rmem_release(sk
, udp_skb_truesize(skb
), 1, false);
1469 EXPORT_SYMBOL(udp_skb_destructor
);
1471 /* as above, but the caller held the rx queue lock, too */
1472 static void udp_skb_dtor_locked(struct sock
*sk
, struct sk_buff
*skb
)
1474 prefetch(&skb
->data
);
1475 udp_rmem_release(sk
, udp_skb_truesize(skb
), 1, true);
1478 /* Idea of busylocks is to let producers grab an extra spinlock
1479 * to relieve pressure on the receive_queue spinlock shared by consumer.
1480 * Under flood, this means that only one producer can be in line
1481 * trying to acquire the receive_queue spinlock.
1482 * These busylock can be allocated on a per cpu manner, instead of a
1483 * per socket one (that would consume a cache line per socket)
1485 static int udp_busylocks_log __read_mostly
;
1486 static spinlock_t
*udp_busylocks __read_mostly
;
1488 static spinlock_t
*busylock_acquire(void *ptr
)
1492 busy
= udp_busylocks
+ hash_ptr(ptr
, udp_busylocks_log
);
1497 static void busylock_release(spinlock_t
*busy
)
1503 int __udp_enqueue_schedule_skb(struct sock
*sk
, struct sk_buff
*skb
)
1505 struct sk_buff_head
*list
= &sk
->sk_receive_queue
;
1506 int rmem
, delta
, amt
, err
= -ENOMEM
;
1507 spinlock_t
*busy
= NULL
;
1510 /* try to avoid the costly atomic add/sub pair when the receive
1511 * queue is full; always allow at least a packet
1513 rmem
= atomic_read(&sk
->sk_rmem_alloc
);
1514 if (rmem
> sk
->sk_rcvbuf
)
1517 /* Under mem pressure, it might be helpful to help udp_recvmsg()
1518 * having linear skbs :
1519 * - Reduce memory overhead and thus increase receive queue capacity
1520 * - Less cache line misses at copyout() time
1521 * - Less work at consume_skb() (less alien page frag freeing)
1523 if (rmem
> (sk
->sk_rcvbuf
>> 1)) {
1526 busy
= busylock_acquire(sk
);
1528 size
= skb
->truesize
;
1529 udp_set_dev_scratch(skb
);
1531 /* we drop only if the receive buf is full and the receive
1532 * queue contains some other skb
1534 rmem
= atomic_add_return(size
, &sk
->sk_rmem_alloc
);
1535 if (rmem
> (size
+ (unsigned int)sk
->sk_rcvbuf
))
1538 spin_lock(&list
->lock
);
1539 if (size
>= sk
->sk_forward_alloc
) {
1540 amt
= sk_mem_pages(size
);
1541 delta
= amt
<< SK_MEM_QUANTUM_SHIFT
;
1542 if (!__sk_mem_raise_allocated(sk
, delta
, amt
, SK_MEM_RECV
)) {
1544 spin_unlock(&list
->lock
);
1548 sk
->sk_forward_alloc
+= delta
;
1551 sk
->sk_forward_alloc
-= size
;
1553 /* no need to setup a destructor, we will explicitly release the
1554 * forward allocated memory on dequeue
1556 sock_skb_set_dropcount(sk
, skb
);
1558 __skb_queue_tail(list
, skb
);
1559 spin_unlock(&list
->lock
);
1561 if (!sock_flag(sk
, SOCK_DEAD
))
1562 sk
->sk_data_ready(sk
);
1564 busylock_release(busy
);
1568 atomic_sub(skb
->truesize
, &sk
->sk_rmem_alloc
);
1571 atomic_inc(&sk
->sk_drops
);
1572 busylock_release(busy
);
1575 EXPORT_SYMBOL_GPL(__udp_enqueue_schedule_skb
);
1577 void udp_destruct_sock(struct sock
*sk
)
1579 /* reclaim completely the forward allocated memory */
1580 struct udp_sock
*up
= udp_sk(sk
);
1581 unsigned int total
= 0;
1582 struct sk_buff
*skb
;
1584 skb_queue_splice_tail_init(&sk
->sk_receive_queue
, &up
->reader_queue
);
1585 while ((skb
= __skb_dequeue(&up
->reader_queue
)) != NULL
) {
1586 total
+= skb
->truesize
;
1589 udp_rmem_release(sk
, total
, 0, true);
1591 inet_sock_destruct(sk
);
1593 EXPORT_SYMBOL_GPL(udp_destruct_sock
);
1595 int udp_init_sock(struct sock
*sk
)
1597 skb_queue_head_init(&udp_sk(sk
)->reader_queue
);
1598 sk
->sk_destruct
= udp_destruct_sock
;
1601 EXPORT_SYMBOL_GPL(udp_init_sock
);
1603 void skb_consume_udp(struct sock
*sk
, struct sk_buff
*skb
, int len
)
1605 if (unlikely(READ_ONCE(sk
->sk_peek_off
) >= 0)) {
1606 bool slow
= lock_sock_fast(sk
);
1608 sk_peek_offset_bwd(sk
, len
);
1609 unlock_sock_fast(sk
, slow
);
1612 if (!skb_unref(skb
))
1615 /* In the more common cases we cleared the head states previously,
1616 * see __udp_queue_rcv_skb().
1618 if (unlikely(udp_skb_has_head_state(skb
)))
1619 skb_release_head_state(skb
);
1620 __consume_stateless_skb(skb
);
1622 EXPORT_SYMBOL_GPL(skb_consume_udp
);
1624 static struct sk_buff
*__first_packet_length(struct sock
*sk
,
1625 struct sk_buff_head
*rcvq
,
1628 struct sk_buff
*skb
;
1630 while ((skb
= skb_peek(rcvq
)) != NULL
) {
1631 if (udp_lib_checksum_complete(skb
)) {
1632 __UDP_INC_STATS(sock_net(sk
), UDP_MIB_CSUMERRORS
,
1634 __UDP_INC_STATS(sock_net(sk
), UDP_MIB_INERRORS
,
1636 atomic_inc(&sk
->sk_drops
);
1637 __skb_unlink(skb
, rcvq
);
1638 *total
+= skb
->truesize
;
1641 udp_skb_csum_unnecessary_set(skb
);
1649 * first_packet_length - return length of first packet in receive queue
1652 * Drops all bad checksum frames, until a valid one is found.
1653 * Returns the length of found skb, or -1 if none is found.
1655 static int first_packet_length(struct sock
*sk
)
1657 struct sk_buff_head
*rcvq
= &udp_sk(sk
)->reader_queue
;
1658 struct sk_buff_head
*sk_queue
= &sk
->sk_receive_queue
;
1659 struct sk_buff
*skb
;
1663 spin_lock_bh(&rcvq
->lock
);
1664 skb
= __first_packet_length(sk
, rcvq
, &total
);
1665 if (!skb
&& !skb_queue_empty_lockless(sk_queue
)) {
1666 spin_lock(&sk_queue
->lock
);
1667 skb_queue_splice_tail_init(sk_queue
, rcvq
);
1668 spin_unlock(&sk_queue
->lock
);
1670 skb
= __first_packet_length(sk
, rcvq
, &total
);
1672 res
= skb
? skb
->len
: -1;
1674 udp_rmem_release(sk
, total
, 1, false);
1675 spin_unlock_bh(&rcvq
->lock
);
1680 * IOCTL requests applicable to the UDP protocol
1683 int udp_ioctl(struct sock
*sk
, int cmd
, unsigned long arg
)
1688 int amount
= sk_wmem_alloc_get(sk
);
1690 return put_user(amount
, (int __user
*)arg
);
1695 int amount
= max_t(int, 0, first_packet_length(sk
));
1697 return put_user(amount
, (int __user
*)arg
);
1701 return -ENOIOCTLCMD
;
1706 EXPORT_SYMBOL(udp_ioctl
);
1708 struct sk_buff
*__skb_recv_udp(struct sock
*sk
, unsigned int flags
,
1709 int noblock
, int *off
, int *err
)
1711 struct sk_buff_head
*sk_queue
= &sk
->sk_receive_queue
;
1712 struct sk_buff_head
*queue
;
1713 struct sk_buff
*last
;
1717 queue
= &udp_sk(sk
)->reader_queue
;
1718 flags
|= noblock
? MSG_DONTWAIT
: 0;
1719 timeo
= sock_rcvtimeo(sk
, flags
& MSG_DONTWAIT
);
1721 struct sk_buff
*skb
;
1723 error
= sock_error(sk
);
1729 spin_lock_bh(&queue
->lock
);
1730 skb
= __skb_try_recv_from_queue(sk
, queue
, flags
, off
,
1733 if (!(flags
& MSG_PEEK
))
1734 udp_skb_destructor(sk
, skb
);
1735 spin_unlock_bh(&queue
->lock
);
1739 if (skb_queue_empty_lockless(sk_queue
)) {
1740 spin_unlock_bh(&queue
->lock
);
1744 /* refill the reader queue and walk it again
1745 * keep both queues locked to avoid re-acquiring
1746 * the sk_receive_queue lock if fwd memory scheduling
1749 spin_lock(&sk_queue
->lock
);
1750 skb_queue_splice_tail_init(sk_queue
, queue
);
1752 skb
= __skb_try_recv_from_queue(sk
, queue
, flags
, off
,
1754 if (skb
&& !(flags
& MSG_PEEK
))
1755 udp_skb_dtor_locked(sk
, skb
);
1756 spin_unlock(&sk_queue
->lock
);
1757 spin_unlock_bh(&queue
->lock
);
1762 if (!sk_can_busy_loop(sk
))
1765 sk_busy_loop(sk
, flags
& MSG_DONTWAIT
);
1766 } while (!skb_queue_empty_lockless(sk_queue
));
1768 /* sk_queue is empty, reader_queue may contain peeked packets */
1770 !__skb_wait_for_more_packets(sk
, &sk
->sk_receive_queue
,
1772 (struct sk_buff
*)sk_queue
));
1777 EXPORT_SYMBOL(__skb_recv_udp
);
1780 * This should be easy, if there is something there we
1781 * return it, otherwise we block.
1784 int udp_recvmsg(struct sock
*sk
, struct msghdr
*msg
, size_t len
, int noblock
,
1785 int flags
, int *addr_len
)
1787 struct inet_sock
*inet
= inet_sk(sk
);
1788 DECLARE_SOCKADDR(struct sockaddr_in
*, sin
, msg
->msg_name
);
1789 struct sk_buff
*skb
;
1790 unsigned int ulen
, copied
;
1791 int off
, err
, peeking
= flags
& MSG_PEEK
;
1792 int is_udplite
= IS_UDPLITE(sk
);
1793 bool checksum_valid
= false;
1795 if (flags
& MSG_ERRQUEUE
)
1796 return ip_recv_error(sk
, msg
, len
, addr_len
);
1799 off
= sk_peek_offset(sk
, flags
);
1800 skb
= __skb_recv_udp(sk
, flags
, noblock
, &off
, &err
);
1804 ulen
= udp_skb_len(skb
);
1806 if (copied
> ulen
- off
)
1807 copied
= ulen
- off
;
1808 else if (copied
< ulen
)
1809 msg
->msg_flags
|= MSG_TRUNC
;
1812 * If checksum is needed at all, try to do it while copying the
1813 * data. If the data is truncated, or if we only want a partial
1814 * coverage checksum (UDP-Lite), do it before the copy.
1817 if (copied
< ulen
|| peeking
||
1818 (is_udplite
&& UDP_SKB_CB(skb
)->partial_cov
)) {
1819 checksum_valid
= udp_skb_csum_unnecessary(skb
) ||
1820 !__udp_lib_checksum_complete(skb
);
1821 if (!checksum_valid
)
1825 if (checksum_valid
|| udp_skb_csum_unnecessary(skb
)) {
1826 if (udp_skb_is_linear(skb
))
1827 err
= copy_linear_skb(skb
, copied
, off
, &msg
->msg_iter
);
1829 err
= skb_copy_datagram_msg(skb
, off
, msg
, copied
);
1831 err
= skb_copy_and_csum_datagram_msg(skb
, off
, msg
);
1837 if (unlikely(err
)) {
1839 atomic_inc(&sk
->sk_drops
);
1840 UDP_INC_STATS(sock_net(sk
),
1841 UDP_MIB_INERRORS
, is_udplite
);
1848 UDP_INC_STATS(sock_net(sk
),
1849 UDP_MIB_INDATAGRAMS
, is_udplite
);
1851 sock_recv_ts_and_drops(msg
, sk
, skb
);
1853 /* Copy the address. */
1855 sin
->sin_family
= AF_INET
;
1856 sin
->sin_port
= udp_hdr(skb
)->source
;
1857 sin
->sin_addr
.s_addr
= ip_hdr(skb
)->saddr
;
1858 memset(sin
->sin_zero
, 0, sizeof(sin
->sin_zero
));
1859 *addr_len
= sizeof(*sin
);
1861 if (cgroup_bpf_enabled
)
1862 BPF_CGROUP_RUN_PROG_UDP4_RECVMSG_LOCK(sk
,
1863 (struct sockaddr
*)sin
);
1866 if (udp_sk(sk
)->gro_enabled
)
1867 udp_cmsg_recv(msg
, sk
, skb
);
1869 if (inet
->cmsg_flags
)
1870 ip_cmsg_recv_offset(msg
, sk
, skb
, sizeof(struct udphdr
), off
);
1873 if (flags
& MSG_TRUNC
)
1876 skb_consume_udp(sk
, skb
, peeking
? -err
: err
);
1880 if (!__sk_queue_drop_skb(sk
, &udp_sk(sk
)->reader_queue
, skb
, flags
,
1881 udp_skb_destructor
)) {
1882 UDP_INC_STATS(sock_net(sk
), UDP_MIB_CSUMERRORS
, is_udplite
);
1883 UDP_INC_STATS(sock_net(sk
), UDP_MIB_INERRORS
, is_udplite
);
1887 /* starting over for a new packet, but check if we need to yield */
1889 msg
->msg_flags
&= ~MSG_TRUNC
;
1893 int udp_pre_connect(struct sock
*sk
, struct sockaddr
*uaddr
, int addr_len
)
1895 /* This check is replicated from __ip4_datagram_connect() and
1896 * intended to prevent BPF program called below from accessing bytes
1897 * that are out of the bound specified by user in addr_len.
1899 if (addr_len
< sizeof(struct sockaddr_in
))
1902 return BPF_CGROUP_RUN_PROG_INET4_CONNECT_LOCK(sk
, uaddr
);
1904 EXPORT_SYMBOL(udp_pre_connect
);
1906 int __udp_disconnect(struct sock
*sk
, int flags
)
1908 struct inet_sock
*inet
= inet_sk(sk
);
1910 * 1003.1g - break association.
1913 sk
->sk_state
= TCP_CLOSE
;
1914 inet
->inet_daddr
= 0;
1915 inet
->inet_dport
= 0;
1916 sock_rps_reset_rxhash(sk
);
1917 sk
->sk_bound_dev_if
= 0;
1918 if (!(sk
->sk_userlocks
& SOCK_BINDADDR_LOCK
)) {
1919 inet_reset_saddr(sk
);
1920 if (sk
->sk_prot
->rehash
&&
1921 (sk
->sk_userlocks
& SOCK_BINDPORT_LOCK
))
1922 sk
->sk_prot
->rehash(sk
);
1925 if (!(sk
->sk_userlocks
& SOCK_BINDPORT_LOCK
)) {
1926 sk
->sk_prot
->unhash(sk
);
1927 inet
->inet_sport
= 0;
1932 EXPORT_SYMBOL(__udp_disconnect
);
1934 int udp_disconnect(struct sock
*sk
, int flags
)
1937 __udp_disconnect(sk
, flags
);
1941 EXPORT_SYMBOL(udp_disconnect
);
1943 void udp_lib_unhash(struct sock
*sk
)
1945 if (sk_hashed(sk
)) {
1946 struct udp_table
*udptable
= sk
->sk_prot
->h
.udp_table
;
1947 struct udp_hslot
*hslot
, *hslot2
;
1949 hslot
= udp_hashslot(udptable
, sock_net(sk
),
1950 udp_sk(sk
)->udp_port_hash
);
1951 hslot2
= udp_hashslot2(udptable
, udp_sk(sk
)->udp_portaddr_hash
);
1953 spin_lock_bh(&hslot
->lock
);
1954 if (rcu_access_pointer(sk
->sk_reuseport_cb
))
1955 reuseport_detach_sock(sk
);
1956 if (sk_del_node_init_rcu(sk
)) {
1958 inet_sk(sk
)->inet_num
= 0;
1959 sock_prot_inuse_add(sock_net(sk
), sk
->sk_prot
, -1);
1961 spin_lock(&hslot2
->lock
);
1962 hlist_del_init_rcu(&udp_sk(sk
)->udp_portaddr_node
);
1964 spin_unlock(&hslot2
->lock
);
1966 spin_unlock_bh(&hslot
->lock
);
1969 EXPORT_SYMBOL(udp_lib_unhash
);
1972 * inet_rcv_saddr was changed, we must rehash secondary hash
1974 void udp_lib_rehash(struct sock
*sk
, u16 newhash
)
1976 if (sk_hashed(sk
)) {
1977 struct udp_table
*udptable
= sk
->sk_prot
->h
.udp_table
;
1978 struct udp_hslot
*hslot
, *hslot2
, *nhslot2
;
1980 hslot2
= udp_hashslot2(udptable
, udp_sk(sk
)->udp_portaddr_hash
);
1981 nhslot2
= udp_hashslot2(udptable
, newhash
);
1982 udp_sk(sk
)->udp_portaddr_hash
= newhash
;
1984 if (hslot2
!= nhslot2
||
1985 rcu_access_pointer(sk
->sk_reuseport_cb
)) {
1986 hslot
= udp_hashslot(udptable
, sock_net(sk
),
1987 udp_sk(sk
)->udp_port_hash
);
1988 /* we must lock primary chain too */
1989 spin_lock_bh(&hslot
->lock
);
1990 if (rcu_access_pointer(sk
->sk_reuseport_cb
))
1991 reuseport_detach_sock(sk
);
1993 if (hslot2
!= nhslot2
) {
1994 spin_lock(&hslot2
->lock
);
1995 hlist_del_init_rcu(&udp_sk(sk
)->udp_portaddr_node
);
1997 spin_unlock(&hslot2
->lock
);
1999 spin_lock(&nhslot2
->lock
);
2000 hlist_add_head_rcu(&udp_sk(sk
)->udp_portaddr_node
,
2003 spin_unlock(&nhslot2
->lock
);
2006 spin_unlock_bh(&hslot
->lock
);
2010 EXPORT_SYMBOL(udp_lib_rehash
);
2012 void udp_v4_rehash(struct sock
*sk
)
2014 u16 new_hash
= ipv4_portaddr_hash(sock_net(sk
),
2015 inet_sk(sk
)->inet_rcv_saddr
,
2016 inet_sk(sk
)->inet_num
);
2017 udp_lib_rehash(sk
, new_hash
);
2020 static int __udp_queue_rcv_skb(struct sock
*sk
, struct sk_buff
*skb
)
2024 if (inet_sk(sk
)->inet_daddr
) {
2025 sock_rps_save_rxhash(sk
, skb
);
2026 sk_mark_napi_id(sk
, skb
);
2027 sk_incoming_cpu_update(sk
);
2029 sk_mark_napi_id_once(sk
, skb
);
2032 rc
= __udp_enqueue_schedule_skb(sk
, skb
);
2034 int is_udplite
= IS_UDPLITE(sk
);
2036 /* Note that an ENOMEM error is charged twice */
2038 UDP_INC_STATS(sock_net(sk
), UDP_MIB_RCVBUFERRORS
,
2041 UDP_INC_STATS(sock_net(sk
), UDP_MIB_MEMERRORS
,
2043 UDP_INC_STATS(sock_net(sk
), UDP_MIB_INERRORS
, is_udplite
);
2045 trace_udp_fail_queue_rcv_skb(rc
, sk
);
2055 * >0: "udp encap" protocol resubmission
2057 * Note that in the success and error cases, the skb is assumed to
2058 * have either been requeued or freed.
2060 static int udp_queue_rcv_one_skb(struct sock
*sk
, struct sk_buff
*skb
)
2062 struct udp_sock
*up
= udp_sk(sk
);
2063 int is_udplite
= IS_UDPLITE(sk
);
2066 * Charge it to the socket, dropping if the queue is full.
2068 if (!xfrm4_policy_check(sk
, XFRM_POLICY_IN
, skb
))
2072 if (static_branch_unlikely(&udp_encap_needed_key
) && up
->encap_type
) {
2073 int (*encap_rcv
)(struct sock
*sk
, struct sk_buff
*skb
);
2076 * This is an encapsulation socket so pass the skb to
2077 * the socket's udp_encap_rcv() hook. Otherwise, just
2078 * fall through and pass this up the UDP socket.
2079 * up->encap_rcv() returns the following value:
2080 * =0 if skb was successfully passed to the encap
2081 * handler or was discarded by it.
2082 * >0 if skb should be passed on to UDP.
2083 * <0 if skb should be resubmitted as proto -N
2086 /* if we're overly short, let UDP handle it */
2087 encap_rcv
= READ_ONCE(up
->encap_rcv
);
2091 /* Verify checksum before giving to encap */
2092 if (udp_lib_checksum_complete(skb
))
2095 ret
= encap_rcv(sk
, skb
);
2097 __UDP_INC_STATS(sock_net(sk
),
2098 UDP_MIB_INDATAGRAMS
,
2104 /* FALLTHROUGH -- it's a UDP Packet */
2108 * UDP-Lite specific tests, ignored on UDP sockets
2110 if ((up
->pcflag
& UDPLITE_RECV_CC
) && UDP_SKB_CB(skb
)->partial_cov
) {
2113 * MIB statistics other than incrementing the error count are
2114 * disabled for the following two types of errors: these depend
2115 * on the application settings, not on the functioning of the
2116 * protocol stack as such.
2118 * RFC 3828 here recommends (sec 3.3): "There should also be a
2119 * way ... to ... at least let the receiving application block
2120 * delivery of packets with coverage values less than a value
2121 * provided by the application."
2123 if (up
->pcrlen
== 0) { /* full coverage was set */
2124 net_dbg_ratelimited("UDPLite: partial coverage %d while full coverage %d requested\n",
2125 UDP_SKB_CB(skb
)->cscov
, skb
->len
);
2128 /* The next case involves violating the min. coverage requested
2129 * by the receiver. This is subtle: if receiver wants x and x is
2130 * greater than the buffersize/MTU then receiver will complain
2131 * that it wants x while sender emits packets of smaller size y.
2132 * Therefore the above ...()->partial_cov statement is essential.
2134 if (UDP_SKB_CB(skb
)->cscov
< up
->pcrlen
) {
2135 net_dbg_ratelimited("UDPLite: coverage %d too small, need min %d\n",
2136 UDP_SKB_CB(skb
)->cscov
, up
->pcrlen
);
2141 prefetch(&sk
->sk_rmem_alloc
);
2142 if (rcu_access_pointer(sk
->sk_filter
) &&
2143 udp_lib_checksum_complete(skb
))
2146 if (sk_filter_trim_cap(sk
, skb
, sizeof(struct udphdr
)))
2149 udp_csum_pull_header(skb
);
2151 ipv4_pktinfo_prepare(sk
, skb
);
2152 return __udp_queue_rcv_skb(sk
, skb
);
2155 __UDP_INC_STATS(sock_net(sk
), UDP_MIB_CSUMERRORS
, is_udplite
);
2157 __UDP_INC_STATS(sock_net(sk
), UDP_MIB_INERRORS
, is_udplite
);
2158 atomic_inc(&sk
->sk_drops
);
2163 static int udp_queue_rcv_skb(struct sock
*sk
, struct sk_buff
*skb
)
2165 struct sk_buff
*next
, *segs
;
2168 if (likely(!udp_unexpected_gso(sk
, skb
)))
2169 return udp_queue_rcv_one_skb(sk
, skb
);
2171 BUILD_BUG_ON(sizeof(struct udp_skb_cb
) > SKB_GSO_CB_OFFSET
);
2172 __skb_push(skb
, -skb_mac_offset(skb
));
2173 segs
= udp_rcv_segment(sk
, skb
, true);
2174 skb_list_walk_safe(segs
, skb
, next
) {
2175 __skb_pull(skb
, skb_transport_offset(skb
));
2176 ret
= udp_queue_rcv_one_skb(sk
, skb
);
2178 ip_protocol_deliver_rcu(dev_net(skb
->dev
), skb
, ret
);
2183 /* For TCP sockets, sk_rx_dst is protected by socket lock
2184 * For UDP, we use xchg() to guard against concurrent changes.
2186 bool udp_sk_rx_dst_set(struct sock
*sk
, struct dst_entry
*dst
)
2188 struct dst_entry
*old
;
2190 if (dst_hold_safe(dst
)) {
2191 old
= xchg(&sk
->sk_rx_dst
, dst
);
2197 EXPORT_SYMBOL(udp_sk_rx_dst_set
);
2200 * Multicasts and broadcasts go to each listener.
2202 * Note: called only from the BH handler context.
2204 static int __udp4_lib_mcast_deliver(struct net
*net
, struct sk_buff
*skb
,
2206 __be32 saddr
, __be32 daddr
,
2207 struct udp_table
*udptable
,
2210 struct sock
*sk
, *first
= NULL
;
2211 unsigned short hnum
= ntohs(uh
->dest
);
2212 struct udp_hslot
*hslot
= udp_hashslot(udptable
, net
, hnum
);
2213 unsigned int hash2
= 0, hash2_any
= 0, use_hash2
= (hslot
->count
> 10);
2214 unsigned int offset
= offsetof(typeof(*sk
), sk_node
);
2215 int dif
= skb
->dev
->ifindex
;
2216 int sdif
= inet_sdif(skb
);
2217 struct hlist_node
*node
;
2218 struct sk_buff
*nskb
;
2221 hash2_any
= ipv4_portaddr_hash(net
, htonl(INADDR_ANY
), hnum
) &
2223 hash2
= ipv4_portaddr_hash(net
, daddr
, hnum
) & udptable
->mask
;
2225 hslot
= &udptable
->hash2
[hash2
];
2226 offset
= offsetof(typeof(*sk
), __sk_common
.skc_portaddr_node
);
2229 sk_for_each_entry_offset_rcu(sk
, node
, &hslot
->head
, offset
) {
2230 if (!__udp_is_mcast_sock(net
, sk
, uh
->dest
, daddr
,
2231 uh
->source
, saddr
, dif
, sdif
, hnum
))
2238 nskb
= skb_clone(skb
, GFP_ATOMIC
);
2240 if (unlikely(!nskb
)) {
2241 atomic_inc(&sk
->sk_drops
);
2242 __UDP_INC_STATS(net
, UDP_MIB_RCVBUFERRORS
,
2244 __UDP_INC_STATS(net
, UDP_MIB_INERRORS
,
2248 if (udp_queue_rcv_skb(sk
, nskb
) > 0)
2252 /* Also lookup *:port if we are using hash2 and haven't done so yet. */
2253 if (use_hash2
&& hash2
!= hash2_any
) {
2259 if (udp_queue_rcv_skb(first
, skb
) > 0)
2263 __UDP_INC_STATS(net
, UDP_MIB_IGNOREDMULTI
,
2264 proto
== IPPROTO_UDPLITE
);
2269 /* Initialize UDP checksum. If exited with zero value (success),
2270 * CHECKSUM_UNNECESSARY means, that no more checks are required.
2271 * Otherwise, csum completion requires checksumming packet body,
2272 * including udp header and folding it to skb->csum.
2274 static inline int udp4_csum_init(struct sk_buff
*skb
, struct udphdr
*uh
,
2279 UDP_SKB_CB(skb
)->partial_cov
= 0;
2280 UDP_SKB_CB(skb
)->cscov
= skb
->len
;
2282 if (proto
== IPPROTO_UDPLITE
) {
2283 err
= udplite_checksum_init(skb
, uh
);
2287 if (UDP_SKB_CB(skb
)->partial_cov
) {
2288 skb
->csum
= inet_compute_pseudo(skb
, proto
);
2293 /* Note, we are only interested in != 0 or == 0, thus the
2296 err
= (__force
int)skb_checksum_init_zero_check(skb
, proto
, uh
->check
,
2297 inet_compute_pseudo
);
2301 if (skb
->ip_summed
== CHECKSUM_COMPLETE
&& !skb
->csum_valid
) {
2302 /* If SW calculated the value, we know it's bad */
2303 if (skb
->csum_complete_sw
)
2306 /* HW says the value is bad. Let's validate that.
2307 * skb->csum is no longer the full packet checksum,
2308 * so don't treat it as such.
2310 skb_checksum_complete_unset(skb
);
2316 /* wrapper for udp_queue_rcv_skb tacking care of csum conversion and
2317 * return code conversion for ip layer consumption
2319 static int udp_unicast_rcv_skb(struct sock
*sk
, struct sk_buff
*skb
,
2324 if (inet_get_convert_csum(sk
) && uh
->check
&& !IS_UDPLITE(sk
))
2325 skb_checksum_try_convert(skb
, IPPROTO_UDP
, inet_compute_pseudo
);
2327 ret
= udp_queue_rcv_skb(sk
, skb
);
2329 /* a return value > 0 means to resubmit the input, but
2330 * it wants the return to be -protocol, or 0
2338 * All we need to do is get the socket, and then do a checksum.
2341 int __udp4_lib_rcv(struct sk_buff
*skb
, struct udp_table
*udptable
,
2346 unsigned short ulen
;
2347 struct rtable
*rt
= skb_rtable(skb
);
2348 __be32 saddr
, daddr
;
2349 struct net
*net
= dev_net(skb
->dev
);
2353 * Validate the packet.
2355 if (!pskb_may_pull(skb
, sizeof(struct udphdr
)))
2356 goto drop
; /* No space for header. */
2359 ulen
= ntohs(uh
->len
);
2360 saddr
= ip_hdr(skb
)->saddr
;
2361 daddr
= ip_hdr(skb
)->daddr
;
2363 if (ulen
> skb
->len
)
2366 if (proto
== IPPROTO_UDP
) {
2367 /* UDP validates ulen. */
2368 if (ulen
< sizeof(*uh
) || pskb_trim_rcsum(skb
, ulen
))
2373 if (udp4_csum_init(skb
, uh
, proto
))
2376 sk
= skb_steal_sock(skb
, &refcounted
);
2378 struct dst_entry
*dst
= skb_dst(skb
);
2381 if (unlikely(sk
->sk_rx_dst
!= dst
))
2382 udp_sk_rx_dst_set(sk
, dst
);
2384 ret
= udp_unicast_rcv_skb(sk
, skb
, uh
);
2390 if (rt
->rt_flags
& (RTCF_BROADCAST
|RTCF_MULTICAST
))
2391 return __udp4_lib_mcast_deliver(net
, skb
, uh
,
2392 saddr
, daddr
, udptable
, proto
);
2394 sk
= __udp4_lib_lookup_skb(skb
, uh
->source
, uh
->dest
, udptable
);
2396 return udp_unicast_rcv_skb(sk
, skb
, uh
);
2398 if (!xfrm4_policy_check(NULL
, XFRM_POLICY_IN
, skb
))
2402 /* No socket. Drop packet silently, if checksum is wrong */
2403 if (udp_lib_checksum_complete(skb
))
2406 __UDP_INC_STATS(net
, UDP_MIB_NOPORTS
, proto
== IPPROTO_UDPLITE
);
2407 icmp_send(skb
, ICMP_DEST_UNREACH
, ICMP_PORT_UNREACH
, 0);
2410 * Hmm. We got an UDP packet to a port to which we
2411 * don't wanna listen. Ignore it.
2417 net_dbg_ratelimited("UDP%s: short packet: From %pI4:%u %d/%d to %pI4:%u\n",
2418 proto
== IPPROTO_UDPLITE
? "Lite" : "",
2419 &saddr
, ntohs(uh
->source
),
2421 &daddr
, ntohs(uh
->dest
));
2426 * RFC1122: OK. Discards the bad packet silently (as far as
2427 * the network is concerned, anyway) as per 4.1.3.4 (MUST).
2429 net_dbg_ratelimited("UDP%s: bad checksum. From %pI4:%u to %pI4:%u ulen %d\n",
2430 proto
== IPPROTO_UDPLITE
? "Lite" : "",
2431 &saddr
, ntohs(uh
->source
), &daddr
, ntohs(uh
->dest
),
2433 __UDP_INC_STATS(net
, UDP_MIB_CSUMERRORS
, proto
== IPPROTO_UDPLITE
);
2435 __UDP_INC_STATS(net
, UDP_MIB_INERRORS
, proto
== IPPROTO_UDPLITE
);
2440 /* We can only early demux multicast if there is a single matching socket.
2441 * If more than one socket found returns NULL
2443 static struct sock
*__udp4_lib_mcast_demux_lookup(struct net
*net
,
2444 __be16 loc_port
, __be32 loc_addr
,
2445 __be16 rmt_port
, __be32 rmt_addr
,
2448 struct sock
*sk
, *result
;
2449 unsigned short hnum
= ntohs(loc_port
);
2450 unsigned int slot
= udp_hashfn(net
, hnum
, udp_table
.mask
);
2451 struct udp_hslot
*hslot
= &udp_table
.hash
[slot
];
2453 /* Do not bother scanning a too big list */
2454 if (hslot
->count
> 10)
2458 sk_for_each_rcu(sk
, &hslot
->head
) {
2459 if (__udp_is_mcast_sock(net
, sk
, loc_port
, loc_addr
,
2460 rmt_port
, rmt_addr
, dif
, sdif
, hnum
)) {
2470 /* For unicast we should only early demux connected sockets or we can
2471 * break forwarding setups. The chains here can be long so only check
2472 * if the first socket is an exact match and if not move on.
2474 static struct sock
*__udp4_lib_demux_lookup(struct net
*net
,
2475 __be16 loc_port
, __be32 loc_addr
,
2476 __be16 rmt_port
, __be32 rmt_addr
,
2479 unsigned short hnum
= ntohs(loc_port
);
2480 unsigned int hash2
= ipv4_portaddr_hash(net
, loc_addr
, hnum
);
2481 unsigned int slot2
= hash2
& udp_table
.mask
;
2482 struct udp_hslot
*hslot2
= &udp_table
.hash2
[slot2
];
2483 INET_ADDR_COOKIE(acookie
, rmt_addr
, loc_addr
);
2484 const __portpair ports
= INET_COMBINED_PORTS(rmt_port
, hnum
);
2487 udp_portaddr_for_each_entry_rcu(sk
, &hslot2
->head
) {
2488 if (INET_MATCH(sk
, net
, acookie
, rmt_addr
,
2489 loc_addr
, ports
, dif
, sdif
))
2491 /* Only check first socket in chain */
2497 int udp_v4_early_demux(struct sk_buff
*skb
)
2499 struct net
*net
= dev_net(skb
->dev
);
2500 struct in_device
*in_dev
= NULL
;
2501 const struct iphdr
*iph
;
2502 const struct udphdr
*uh
;
2503 struct sock
*sk
= NULL
;
2504 struct dst_entry
*dst
;
2505 int dif
= skb
->dev
->ifindex
;
2506 int sdif
= inet_sdif(skb
);
2509 /* validate the packet */
2510 if (!pskb_may_pull(skb
, skb_transport_offset(skb
) + sizeof(struct udphdr
)))
2516 if (skb
->pkt_type
== PACKET_MULTICAST
) {
2517 in_dev
= __in_dev_get_rcu(skb
->dev
);
2522 ours
= ip_check_mc_rcu(in_dev
, iph
->daddr
, iph
->saddr
,
2527 sk
= __udp4_lib_mcast_demux_lookup(net
, uh
->dest
, iph
->daddr
,
2528 uh
->source
, iph
->saddr
,
2530 } else if (skb
->pkt_type
== PACKET_HOST
) {
2531 sk
= __udp4_lib_demux_lookup(net
, uh
->dest
, iph
->daddr
,
2532 uh
->source
, iph
->saddr
, dif
, sdif
);
2535 if (!sk
|| !refcount_inc_not_zero(&sk
->sk_refcnt
))
2539 skb
->destructor
= sock_efree
;
2540 dst
= READ_ONCE(sk
->sk_rx_dst
);
2543 dst
= dst_check(dst
, 0);
2547 /* set noref for now.
2548 * any place which wants to hold dst has to call
2551 skb_dst_set_noref(skb
, dst
);
2553 /* for unconnected multicast sockets we need to validate
2554 * the source on each packet
2556 if (!inet_sk(sk
)->inet_daddr
&& in_dev
)
2557 return ip_mc_validate_source(skb
, iph
->daddr
,
2558 iph
->saddr
, iph
->tos
,
2559 skb
->dev
, in_dev
, &itag
);
2564 int udp_rcv(struct sk_buff
*skb
)
2566 return __udp4_lib_rcv(skb
, &udp_table
, IPPROTO_UDP
);
2569 void udp_destroy_sock(struct sock
*sk
)
2571 struct udp_sock
*up
= udp_sk(sk
);
2572 bool slow
= lock_sock_fast(sk
);
2573 udp_flush_pending_frames(sk
);
2574 unlock_sock_fast(sk
, slow
);
2575 if (static_branch_unlikely(&udp_encap_needed_key
)) {
2576 if (up
->encap_type
) {
2577 void (*encap_destroy
)(struct sock
*sk
);
2578 encap_destroy
= READ_ONCE(up
->encap_destroy
);
2582 if (up
->encap_enabled
)
2583 static_branch_dec(&udp_encap_needed_key
);
2588 * Socket option code for UDP
2590 int udp_lib_setsockopt(struct sock
*sk
, int level
, int optname
,
2591 sockptr_t optval
, unsigned int optlen
,
2592 int (*push_pending_frames
)(struct sock
*))
2594 struct udp_sock
*up
= udp_sk(sk
);
2597 int is_udplite
= IS_UDPLITE(sk
);
2599 if (optlen
< sizeof(int))
2602 if (copy_from_sockptr(&val
, optval
, sizeof(val
)))
2605 valbool
= val
? 1 : 0;
2614 push_pending_frames(sk
);
2623 case UDP_ENCAP_ESPINUDP
:
2624 case UDP_ENCAP_ESPINUDP_NON_IKE
:
2625 #if IS_ENABLED(CONFIG_IPV6)
2626 if (sk
->sk_family
== AF_INET6
)
2627 up
->encap_rcv
= ipv6_stub
->xfrm6_udp_encap_rcv
;
2630 up
->encap_rcv
= xfrm4_udp_encap_rcv
;
2633 case UDP_ENCAP_L2TPINUDP
:
2634 up
->encap_type
= val
;
2636 udp_tunnel_encap_enable(sk
->sk_socket
);
2645 case UDP_NO_CHECK6_TX
:
2646 up
->no_check6_tx
= valbool
;
2649 case UDP_NO_CHECK6_RX
:
2650 up
->no_check6_rx
= valbool
;
2654 if (val
< 0 || val
> USHRT_MAX
)
2662 udp_tunnel_encap_enable(sk
->sk_socket
);
2663 up
->gro_enabled
= valbool
;
2668 * UDP-Lite's partial checksum coverage (RFC 3828).
2670 /* The sender sets actual checksum coverage length via this option.
2671 * The case coverage > packet length is handled by send module. */
2672 case UDPLITE_SEND_CSCOV
:
2673 if (!is_udplite
) /* Disable the option on UDP sockets */
2674 return -ENOPROTOOPT
;
2675 if (val
!= 0 && val
< 8) /* Illegal coverage: use default (8) */
2677 else if (val
> USHRT_MAX
)
2680 up
->pcflag
|= UDPLITE_SEND_CC
;
2683 /* The receiver specifies a minimum checksum coverage value. To make
2684 * sense, this should be set to at least 8 (as done below). If zero is
2685 * used, this again means full checksum coverage. */
2686 case UDPLITE_RECV_CSCOV
:
2687 if (!is_udplite
) /* Disable the option on UDP sockets */
2688 return -ENOPROTOOPT
;
2689 if (val
!= 0 && val
< 8) /* Avoid silly minimal values. */
2691 else if (val
> USHRT_MAX
)
2694 up
->pcflag
|= UDPLITE_RECV_CC
;
2704 EXPORT_SYMBOL(udp_lib_setsockopt
);
2706 int udp_setsockopt(struct sock
*sk
, int level
, int optname
, sockptr_t optval
,
2707 unsigned int optlen
)
2709 if (level
== SOL_UDP
|| level
== SOL_UDPLITE
)
2710 return udp_lib_setsockopt(sk
, level
, optname
,
2712 udp_push_pending_frames
);
2713 return ip_setsockopt(sk
, level
, optname
, optval
, optlen
);
2716 int udp_lib_getsockopt(struct sock
*sk
, int level
, int optname
,
2717 char __user
*optval
, int __user
*optlen
)
2719 struct udp_sock
*up
= udp_sk(sk
);
2722 if (get_user(len
, optlen
))
2725 len
= min_t(unsigned int, len
, sizeof(int));
2736 val
= up
->encap_type
;
2739 case UDP_NO_CHECK6_TX
:
2740 val
= up
->no_check6_tx
;
2743 case UDP_NO_CHECK6_RX
:
2744 val
= up
->no_check6_rx
;
2751 /* The following two cannot be changed on UDP sockets, the return is
2752 * always 0 (which corresponds to the full checksum coverage of UDP). */
2753 case UDPLITE_SEND_CSCOV
:
2757 case UDPLITE_RECV_CSCOV
:
2762 return -ENOPROTOOPT
;
2765 if (put_user(len
, optlen
))
2767 if (copy_to_user(optval
, &val
, len
))
2771 EXPORT_SYMBOL(udp_lib_getsockopt
);
2773 int udp_getsockopt(struct sock
*sk
, int level
, int optname
,
2774 char __user
*optval
, int __user
*optlen
)
2776 if (level
== SOL_UDP
|| level
== SOL_UDPLITE
)
2777 return udp_lib_getsockopt(sk
, level
, optname
, optval
, optlen
);
2778 return ip_getsockopt(sk
, level
, optname
, optval
, optlen
);
2782 * udp_poll - wait for a UDP event.
2783 * @file: - file struct
2785 * @wait: - poll table
2787 * This is same as datagram poll, except for the special case of
2788 * blocking sockets. If application is using a blocking fd
2789 * and a packet with checksum error is in the queue;
2790 * then it could get return from select indicating data available
2791 * but then block when reading it. Add special case code
2792 * to work around these arguably broken applications.
2794 __poll_t
udp_poll(struct file
*file
, struct socket
*sock
, poll_table
*wait
)
2796 __poll_t mask
= datagram_poll(file
, sock
, wait
);
2797 struct sock
*sk
= sock
->sk
;
2799 if (!skb_queue_empty_lockless(&udp_sk(sk
)->reader_queue
))
2800 mask
|= EPOLLIN
| EPOLLRDNORM
;
2802 /* Check for false positives due to checksum errors */
2803 if ((mask
& EPOLLRDNORM
) && !(file
->f_flags
& O_NONBLOCK
) &&
2804 !(sk
->sk_shutdown
& RCV_SHUTDOWN
) && first_packet_length(sk
) == -1)
2805 mask
&= ~(EPOLLIN
| EPOLLRDNORM
);
2810 EXPORT_SYMBOL(udp_poll
);
2812 int udp_abort(struct sock
*sk
, int err
)
2817 sk
->sk_error_report(sk
);
2818 __udp_disconnect(sk
, 0);
2824 EXPORT_SYMBOL_GPL(udp_abort
);
2826 struct proto udp_prot
= {
2828 .owner
= THIS_MODULE
,
2829 .close
= udp_lib_close
,
2830 .pre_connect
= udp_pre_connect
,
2831 .connect
= ip4_datagram_connect
,
2832 .disconnect
= udp_disconnect
,
2834 .init
= udp_init_sock
,
2835 .destroy
= udp_destroy_sock
,
2836 .setsockopt
= udp_setsockopt
,
2837 .getsockopt
= udp_getsockopt
,
2838 .sendmsg
= udp_sendmsg
,
2839 .recvmsg
= udp_recvmsg
,
2840 .sendpage
= udp_sendpage
,
2841 .release_cb
= ip4_datagram_release_cb
,
2842 .hash
= udp_lib_hash
,
2843 .unhash
= udp_lib_unhash
,
2844 .rehash
= udp_v4_rehash
,
2845 .get_port
= udp_v4_get_port
,
2846 .memory_allocated
= &udp_memory_allocated
,
2847 .sysctl_mem
= sysctl_udp_mem
,
2848 .sysctl_wmem_offset
= offsetof(struct net
, ipv4
.sysctl_udp_wmem_min
),
2849 .sysctl_rmem_offset
= offsetof(struct net
, ipv4
.sysctl_udp_rmem_min
),
2850 .obj_size
= sizeof(struct udp_sock
),
2851 .h
.udp_table
= &udp_table
,
2852 .diag_destroy
= udp_abort
,
2854 EXPORT_SYMBOL(udp_prot
);
2856 /* ------------------------------------------------------------------------ */
2857 #ifdef CONFIG_PROC_FS
2859 static struct sock
*udp_get_first(struct seq_file
*seq
, int start
)
2862 struct udp_seq_afinfo
*afinfo
;
2863 struct udp_iter_state
*state
= seq
->private;
2864 struct net
*net
= seq_file_net(seq
);
2866 if (state
->bpf_seq_afinfo
)
2867 afinfo
= state
->bpf_seq_afinfo
;
2869 afinfo
= PDE_DATA(file_inode(seq
->file
));
2871 for (state
->bucket
= start
; state
->bucket
<= afinfo
->udp_table
->mask
;
2873 struct udp_hslot
*hslot
= &afinfo
->udp_table
->hash
[state
->bucket
];
2875 if (hlist_empty(&hslot
->head
))
2878 spin_lock_bh(&hslot
->lock
);
2879 sk_for_each(sk
, &hslot
->head
) {
2880 if (!net_eq(sock_net(sk
), net
))
2882 if (afinfo
->family
== AF_UNSPEC
||
2883 sk
->sk_family
== afinfo
->family
)
2886 spin_unlock_bh(&hslot
->lock
);
2893 static struct sock
*udp_get_next(struct seq_file
*seq
, struct sock
*sk
)
2895 struct udp_seq_afinfo
*afinfo
;
2896 struct udp_iter_state
*state
= seq
->private;
2897 struct net
*net
= seq_file_net(seq
);
2899 if (state
->bpf_seq_afinfo
)
2900 afinfo
= state
->bpf_seq_afinfo
;
2902 afinfo
= PDE_DATA(file_inode(seq
->file
));
2906 } while (sk
&& (!net_eq(sock_net(sk
), net
) ||
2907 (afinfo
->family
!= AF_UNSPEC
&&
2908 sk
->sk_family
!= afinfo
->family
)));
2911 if (state
->bucket
<= afinfo
->udp_table
->mask
)
2912 spin_unlock_bh(&afinfo
->udp_table
->hash
[state
->bucket
].lock
);
2913 return udp_get_first(seq
, state
->bucket
+ 1);
2918 static struct sock
*udp_get_idx(struct seq_file
*seq
, loff_t pos
)
2920 struct sock
*sk
= udp_get_first(seq
, 0);
2923 while (pos
&& (sk
= udp_get_next(seq
, sk
)) != NULL
)
2925 return pos
? NULL
: sk
;
2928 void *udp_seq_start(struct seq_file
*seq
, loff_t
*pos
)
2930 struct udp_iter_state
*state
= seq
->private;
2931 state
->bucket
= MAX_UDP_PORTS
;
2933 return *pos
? udp_get_idx(seq
, *pos
-1) : SEQ_START_TOKEN
;
2935 EXPORT_SYMBOL(udp_seq_start
);
2937 void *udp_seq_next(struct seq_file
*seq
, void *v
, loff_t
*pos
)
2941 if (v
== SEQ_START_TOKEN
)
2942 sk
= udp_get_idx(seq
, 0);
2944 sk
= udp_get_next(seq
, v
);
2949 EXPORT_SYMBOL(udp_seq_next
);
2951 void udp_seq_stop(struct seq_file
*seq
, void *v
)
2953 struct udp_seq_afinfo
*afinfo
;
2954 struct udp_iter_state
*state
= seq
->private;
2956 if (state
->bpf_seq_afinfo
)
2957 afinfo
= state
->bpf_seq_afinfo
;
2959 afinfo
= PDE_DATA(file_inode(seq
->file
));
2961 if (state
->bucket
<= afinfo
->udp_table
->mask
)
2962 spin_unlock_bh(&afinfo
->udp_table
->hash
[state
->bucket
].lock
);
2964 EXPORT_SYMBOL(udp_seq_stop
);
2966 /* ------------------------------------------------------------------------ */
2967 static void udp4_format_sock(struct sock
*sp
, struct seq_file
*f
,
2970 struct inet_sock
*inet
= inet_sk(sp
);
2971 __be32 dest
= inet
->inet_daddr
;
2972 __be32 src
= inet
->inet_rcv_saddr
;
2973 __u16 destp
= ntohs(inet
->inet_dport
);
2974 __u16 srcp
= ntohs(inet
->inet_sport
);
2976 seq_printf(f
, "%5d: %08X:%04X %08X:%04X"
2977 " %02X %08X:%08X %02X:%08lX %08X %5u %8d %lu %d %pK %u",
2978 bucket
, src
, srcp
, dest
, destp
, sp
->sk_state
,
2979 sk_wmem_alloc_get(sp
),
2982 from_kuid_munged(seq_user_ns(f
), sock_i_uid(sp
)),
2984 refcount_read(&sp
->sk_refcnt
), sp
,
2985 atomic_read(&sp
->sk_drops
));
2988 int udp4_seq_show(struct seq_file
*seq
, void *v
)
2990 seq_setwidth(seq
, 127);
2991 if (v
== SEQ_START_TOKEN
)
2992 seq_puts(seq
, " sl local_address rem_address st tx_queue "
2993 "rx_queue tr tm->when retrnsmt uid timeout "
2994 "inode ref pointer drops");
2996 struct udp_iter_state
*state
= seq
->private;
2998 udp4_format_sock(v
, seq
, state
->bucket
);
3004 #ifdef CONFIG_BPF_SYSCALL
3005 struct bpf_iter__udp
{
3006 __bpf_md_ptr(struct bpf_iter_meta
*, meta
);
3007 __bpf_md_ptr(struct udp_sock
*, udp_sk
);
3008 uid_t uid
__aligned(8);
3009 int bucket
__aligned(8);
3012 static int udp_prog_seq_show(struct bpf_prog
*prog
, struct bpf_iter_meta
*meta
,
3013 struct udp_sock
*udp_sk
, uid_t uid
, int bucket
)
3015 struct bpf_iter__udp ctx
;
3017 meta
->seq_num
--; /* skip SEQ_START_TOKEN */
3019 ctx
.udp_sk
= udp_sk
;
3021 ctx
.bucket
= bucket
;
3022 return bpf_iter_run_prog(prog
, &ctx
);
3025 static int bpf_iter_udp_seq_show(struct seq_file
*seq
, void *v
)
3027 struct udp_iter_state
*state
= seq
->private;
3028 struct bpf_iter_meta meta
;
3029 struct bpf_prog
*prog
;
3030 struct sock
*sk
= v
;
3033 if (v
== SEQ_START_TOKEN
)
3036 uid
= from_kuid_munged(seq_user_ns(seq
), sock_i_uid(sk
));
3038 prog
= bpf_iter_get_info(&meta
, false);
3039 return udp_prog_seq_show(prog
, &meta
, v
, uid
, state
->bucket
);
3042 static void bpf_iter_udp_seq_stop(struct seq_file
*seq
, void *v
)
3044 struct bpf_iter_meta meta
;
3045 struct bpf_prog
*prog
;
3049 prog
= bpf_iter_get_info(&meta
, true);
3051 (void)udp_prog_seq_show(prog
, &meta
, v
, 0, 0);
3054 udp_seq_stop(seq
, v
);
3057 static const struct seq_operations bpf_iter_udp_seq_ops
= {
3058 .start
= udp_seq_start
,
3059 .next
= udp_seq_next
,
3060 .stop
= bpf_iter_udp_seq_stop
,
3061 .show
= bpf_iter_udp_seq_show
,
3065 const struct seq_operations udp_seq_ops
= {
3066 .start
= udp_seq_start
,
3067 .next
= udp_seq_next
,
3068 .stop
= udp_seq_stop
,
3069 .show
= udp4_seq_show
,
3071 EXPORT_SYMBOL(udp_seq_ops
);
3073 static struct udp_seq_afinfo udp4_seq_afinfo
= {
3075 .udp_table
= &udp_table
,
3078 static int __net_init
udp4_proc_init_net(struct net
*net
)
3080 if (!proc_create_net_data("udp", 0444, net
->proc_net
, &udp_seq_ops
,
3081 sizeof(struct udp_iter_state
), &udp4_seq_afinfo
))
3086 static void __net_exit
udp4_proc_exit_net(struct net
*net
)
3088 remove_proc_entry("udp", net
->proc_net
);
3091 static struct pernet_operations udp4_net_ops
= {
3092 .init
= udp4_proc_init_net
,
3093 .exit
= udp4_proc_exit_net
,
3096 int __init
udp4_proc_init(void)
3098 return register_pernet_subsys(&udp4_net_ops
);
3101 void udp4_proc_exit(void)
3103 unregister_pernet_subsys(&udp4_net_ops
);
3105 #endif /* CONFIG_PROC_FS */
3107 static __initdata
unsigned long uhash_entries
;
3108 static int __init
set_uhash_entries(char *str
)
3115 ret
= kstrtoul(str
, 0, &uhash_entries
);
3119 if (uhash_entries
&& uhash_entries
< UDP_HTABLE_SIZE_MIN
)
3120 uhash_entries
= UDP_HTABLE_SIZE_MIN
;
3123 __setup("uhash_entries=", set_uhash_entries
);
3125 void __init
udp_table_init(struct udp_table
*table
, const char *name
)
3129 table
->hash
= alloc_large_system_hash(name
,
3130 2 * sizeof(struct udp_hslot
),
3132 21, /* one slot per 2 MB */
3136 UDP_HTABLE_SIZE_MIN
,
3139 table
->hash2
= table
->hash
+ (table
->mask
+ 1);
3140 for (i
= 0; i
<= table
->mask
; i
++) {
3141 INIT_HLIST_HEAD(&table
->hash
[i
].head
);
3142 table
->hash
[i
].count
= 0;
3143 spin_lock_init(&table
->hash
[i
].lock
);
3145 for (i
= 0; i
<= table
->mask
; i
++) {
3146 INIT_HLIST_HEAD(&table
->hash2
[i
].head
);
3147 table
->hash2
[i
].count
= 0;
3148 spin_lock_init(&table
->hash2
[i
].lock
);
3152 u32
udp_flow_hashrnd(void)
3154 static u32 hashrnd __read_mostly
;
3156 net_get_random_once(&hashrnd
, sizeof(hashrnd
));
3160 EXPORT_SYMBOL(udp_flow_hashrnd
);
3162 static void __udp_sysctl_init(struct net
*net
)
3164 net
->ipv4
.sysctl_udp_rmem_min
= SK_MEM_QUANTUM
;
3165 net
->ipv4
.sysctl_udp_wmem_min
= SK_MEM_QUANTUM
;
3167 #ifdef CONFIG_NET_L3_MASTER_DEV
3168 net
->ipv4
.sysctl_udp_l3mdev_accept
= 0;
3172 static int __net_init
udp_sysctl_init(struct net
*net
)
3174 __udp_sysctl_init(net
);
3178 static struct pernet_operations __net_initdata udp_sysctl_ops
= {
3179 .init
= udp_sysctl_init
,
3182 #if defined(CONFIG_BPF_SYSCALL) && defined(CONFIG_PROC_FS)
3183 DEFINE_BPF_ITER_FUNC(udp
, struct bpf_iter_meta
*meta
,
3184 struct udp_sock
*udp_sk
, uid_t uid
, int bucket
)
3186 static int bpf_iter_init_udp(void *priv_data
, struct bpf_iter_aux_info
*aux
)
3188 struct udp_iter_state
*st
= priv_data
;
3189 struct udp_seq_afinfo
*afinfo
;
3192 afinfo
= kmalloc(sizeof(*afinfo
), GFP_USER
| __GFP_NOWARN
);
3196 afinfo
->family
= AF_UNSPEC
;
3197 afinfo
->udp_table
= &udp_table
;
3198 st
->bpf_seq_afinfo
= afinfo
;
3199 ret
= bpf_iter_init_seq_net(priv_data
, aux
);
3205 static void bpf_iter_fini_udp(void *priv_data
)
3207 struct udp_iter_state
*st
= priv_data
;
3209 kfree(st
->bpf_seq_afinfo
);
3210 bpf_iter_fini_seq_net(priv_data
);
3213 static const struct bpf_iter_seq_info udp_seq_info
= {
3214 .seq_ops
= &bpf_iter_udp_seq_ops
,
3215 .init_seq_private
= bpf_iter_init_udp
,
3216 .fini_seq_private
= bpf_iter_fini_udp
,
3217 .seq_priv_size
= sizeof(struct udp_iter_state
),
3220 static struct bpf_iter_reg udp_reg_info
= {
3222 .ctx_arg_info_size
= 1,
3224 { offsetof(struct bpf_iter__udp
, udp_sk
),
3225 PTR_TO_BTF_ID_OR_NULL
},
3227 .seq_info
= &udp_seq_info
,
3230 static void __init
bpf_iter_register(void)
3232 udp_reg_info
.ctx_arg_info
[0].btf_id
= btf_sock_ids
[BTF_SOCK_TYPE_UDP
];
3233 if (bpf_iter_reg_target(&udp_reg_info
))
3234 pr_warn("Warning: could not register bpf iterator udp\n");
3238 void __init
udp_init(void)
3240 unsigned long limit
;
3243 udp_table_init(&udp_table
, "UDP");
3244 limit
= nr_free_buffer_pages() / 8;
3245 limit
= max(limit
, 128UL);
3246 sysctl_udp_mem
[0] = limit
/ 4 * 3;
3247 sysctl_udp_mem
[1] = limit
;
3248 sysctl_udp_mem
[2] = sysctl_udp_mem
[0] * 2;
3250 __udp_sysctl_init(&init_net
);
3252 /* 16 spinlocks per cpu */
3253 udp_busylocks_log
= ilog2(nr_cpu_ids
) + 4;
3254 udp_busylocks
= kmalloc(sizeof(spinlock_t
) << udp_busylocks_log
,
3257 panic("UDP: failed to alloc udp_busylocks\n");
3258 for (i
= 0; i
< (1U << udp_busylocks_log
); i
++)
3259 spin_lock_init(udp_busylocks
+ i
);
3261 if (register_pernet_subsys(&udp_sysctl_ops
))
3262 panic("UDP: failed to init sysctl parameters.\n");
3264 #if defined(CONFIG_BPF_SYSCALL) && defined(CONFIG_PROC_FS)
3265 bpf_iter_register();